repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
andaag/scikit-learn | sklearn/metrics/pairwise.py | 104 | 42995 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
jcasner/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/ticker.py | 69 | 37420 | """
Tick locating and formatting
============================
This module contains classes to support completely configurable tick
locating and formatting. Although the locators know nothing about
major or minor ticks, they are used by the Axis class to support major
and minor tick locating and formatting. Generic tick locators and
formatters are provided, as well as domain specific custom ones..
Tick locating
-------------
The Locator class is the base class for all tick locators. The
locators handle autoscaling of the view limits based on the data
limits, and the choosing of tick locations. A useful semi-automatic
tick locator is MultipleLocator. You initialize this with a base, eg
10, and it picks axis limits and ticks that are multiples of your
base.
The Locator subclasses defined here are
:class:`NullLocator`
No ticks
:class:`FixedLocator`
Tick locations are fixed
:class:`IndexLocator`
locator for index plots (eg. where x = range(len(y)))
:class:`LinearLocator`
evenly spaced ticks from min to max
:class:`LogLocator`
logarithmically ticks from min to max
:class:`MultipleLocator`
ticks and range are a multiple of base;
either integer or float
:class:`OldAutoLocator`
choose a MultipleLocator and dyamically reassign it for
intelligent ticking during navigation
:class:`MaxNLocator`
finds up to a max number of ticks at nice locations
:class:`AutoLocator`
:class:`MaxNLocator` with simple defaults. This is the default
tick locator for most plotting.
There are a number of locators specialized for date locations - see
the dates module
You can define your own locator by deriving from Locator. You must
override the __call__ method, which returns a sequence of locations,
and you will probably want to override the autoscale method to set the
view limits from the data limits.
If you want to override the default locator, use one of the above or a
custom locator and pass it to the x or y axis instance. The relevant
methods are::
ax.xaxis.set_major_locator( xmajorLocator )
ax.xaxis.set_minor_locator( xminorLocator )
ax.yaxis.set_major_locator( ymajorLocator )
ax.yaxis.set_minor_locator( yminorLocator )
The default minor locator is the NullLocator, eg no minor ticks on by
default.
Tick formatting
---------------
Tick formatting is controlled by classes derived from Formatter. The
formatter operates on a single tick value and returns a string to the
axis.
:class:`NullFormatter`
no labels on the ticks
:class:`FixedFormatter`
set the strings manually for the labels
:class:`FuncFormatter`
user defined function sets the labels
:class:`FormatStrFormatter`
use a sprintf format string
:class:`ScalarFormatter`
default formatter for scalars; autopick the fmt string
:class:`LogFormatter`
formatter for log axes
You can derive your own formatter from the Formatter base class by
simply overriding the ``__call__`` method. The formatter class has access
to the axis view and data limits.
To control the major and minor tick label formats, use one of the
following methods::
ax.xaxis.set_major_formatter( xmajorFormatter )
ax.xaxis.set_minor_formatter( xminorFormatter )
ax.yaxis.set_major_formatter( ymajorFormatter )
ax.yaxis.set_minor_formatter( yminorFormatter )
See :ref:`pylab_examples-major_minor_demo1` for an example of setting
major an minor ticks. See the :mod:`matplotlib.dates` module for
more information and examples of using date locators and formatters.
"""
from __future__ import division
import math
import numpy as np
from matplotlib import rcParams
from matplotlib import cbook
from matplotlib import transforms as mtransforms
class TickHelper:
axis = None
class DummyAxis:
def __init__(self):
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
def get_view_interval(self):
return self.viewLim.intervalx
def set_view_interval(self, vmin, vmax):
self.viewLim.intervalx = vmin, vmax
def get_data_interval(self):
return self.dataLim.intervalx
def set_data_interval(self, vmin, vmax):
self.dataLim.intervalx = vmin, vmax
def set_axis(self, axis):
self.axis = axis
def create_dummy_axis(self):
if self.axis is None:
self.axis = self.DummyAxis()
def set_view_interval(self, vmin, vmax):
self.axis.set_view_interval(vmin, vmax)
def set_data_interval(self, vmin, vmax):
self.axis.set_data_interval(vmin, vmax)
def set_bounds(self, vmin, vmax):
self.set_view_interval(vmin, vmax)
self.set_data_interval(vmin, vmax)
class Formatter(TickHelper):
"""
Convert the tick location to a string
"""
# some classes want to see all the locs to help format
# individual ones
locs = []
def __call__(self, x, pos=None):
'Return the format for tick val x at position pos; pos=None indicated unspecified'
raise NotImplementedError('Derived must overide')
def format_data(self,value):
return self.__call__(value)
def format_data_short(self,value):
'return a short string version'
return self.format_data(value)
def get_offset(self):
return ''
def set_locs(self, locs):
self.locs = locs
def fix_minus(self, s):
"""
some classes may want to replace a hyphen for minus with the
proper unicode symbol as described `here
<http://sourceforge.net/tracker/index.php?func=detail&aid=1962574&group_id=80706&atid=560720>`_.
The default is to do nothing
Note, if you use this method, eg in :meth`format_data` or
call, you probably don't want to use it for
:meth:`format_data_short` since the toolbar uses this for
interative coord reporting and I doubt we can expect GUIs
across platforms will handle the unicode correctly. So for
now the classes that override :meth:`fix_minus` should have an
explicit :meth:`format_data_short` method
"""
return s
class NullFormatter(Formatter):
'Always return the empty string'
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return ''
class FixedFormatter(Formatter):
'Return fixed strings for tick labels'
def __init__(self, seq):
"""
seq is a sequence of strings. For positions `i<len(seq)` return
*seq[i]* regardless of *x*. Otherwise return ''
"""
self.seq = seq
self.offset_string = ''
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
if pos is None or pos>=len(self.seq): return ''
else: return self.seq[pos]
def get_offset(self):
return self.offset_string
def set_offset_string(self, ofs):
self.offset_string = ofs
class FuncFormatter(Formatter):
"""
User defined function for formatting
"""
def __init__(self, func):
self.func = func
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.func(x, pos)
class FormatStrFormatter(Formatter):
"""
Use a format string to format the tick
"""
def __init__(self, fmt):
self.fmt = fmt
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.fmt % x
class OldScalarFormatter(Formatter):
"""
Tick location is a plain old number.
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
xmin, xmax = self.axis.get_view_interval()
d = abs(xmax - xmin)
return self.pprint_val(x,d)
def pprint_val(self, x, d):
#if the number is not too big and it's an int, format it as an
#int
if abs(x)<1e4 and x==int(x): return '%d' % x
if d < 1e-2: fmt = '%1.3e'
elif d < 1e-1: fmt = '%1.3f'
elif d > 1e5: fmt = '%1.1e'
elif d > 10 : fmt = '%1.1f'
elif d > 1 : fmt = '%1.2f'
else: fmt = '%1.3f'
s = fmt % x
#print d, x, fmt, s
tup = s.split('e')
if len(tup)==2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
s = '%se%s%s' %(mantissa, sign, exponent)
else:
s = s.rstrip('0').rstrip('.')
return s
class ScalarFormatter(Formatter):
"""
Tick location is a plain old number. If useOffset==True and the data range
is much smaller than the data average, then an offset will be determined
such that the tick labels are meaningful. Scientific notation is used for
data < 1e-3 or data >= 1e4.
"""
def __init__(self, useOffset=True, useMathText=False):
# useOffset allows plotting small data ranges with large offsets:
# for example: [1+1e-9,1+2e-9,1+3e-9]
# useMathText will render the offset and scientific notation in mathtext
self._useOffset = useOffset
self._usetex = rcParams['text.usetex']
self._useMathText = useMathText
self.offset = 0
self.orderOfMagnitude = 0
self.format = ''
self._scientific = True
self._powerlimits = rcParams['axes.formatter.limits']
def fix_minus(self, s):
'use a unicode minus rather than hyphen'
if rcParams['text.usetex'] or not rcParams['axes.unicode_minus']: return s
else: return s.replace('-', u'\u2212')
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
if len(self.locs)==0:
return ''
else:
s = self.pprint_val(x)
return self.fix_minus(s)
def set_scientific(self, b):
'''True or False to turn scientific notation on or off
see also :meth:`set_powerlimits`
'''
self._scientific = bool(b)
def set_powerlimits(self, lims):
'''
Sets size thresholds for scientific notation.
e.g. ``xaxis.set_powerlimits((-3, 4))`` sets the pre-2007 default in
which scientific notation is used for numbers less than
1e-3 or greater than 1e4.
See also :meth:`set_scientific`.
'''
assert len(lims) == 2, "argument must be a sequence of length 2"
self._powerlimits = lims
def format_data_short(self,value):
'return a short formatted string representation of a number'
return '%1.3g'%value
def format_data(self,value):
'return a formatted string representation of a number'
s = self._formatSciNotation('%1.10e'% value)
return self.fix_minus(s)
def get_offset(self):
"""Return scientific notation, plus offset"""
if len(self.locs)==0: return ''
s = ''
if self.orderOfMagnitude or self.offset:
offsetStr = ''
sciNotStr = ''
if self.offset:
offsetStr = self.format_data(self.offset)
if self.offset > 0: offsetStr = '+' + offsetStr
if self.orderOfMagnitude:
if self._usetex or self._useMathText:
sciNotStr = self.format_data(10**self.orderOfMagnitude)
else:
sciNotStr = '1e%d'% self.orderOfMagnitude
if self._useMathText:
if sciNotStr != '':
sciNotStr = r'\times\mathdefault{%s}' % sciNotStr
s = ''.join(('$',sciNotStr,r'\mathdefault{',offsetStr,'}$'))
elif self._usetex:
if sciNotStr != '':
sciNotStr = r'\times%s' % sciNotStr
s = ''.join(('$',sciNotStr,offsetStr,'$'))
else:
s = ''.join((sciNotStr,offsetStr))
return self.fix_minus(s)
def set_locs(self, locs):
'set the locations of the ticks'
self.locs = locs
if len(self.locs) > 0:
vmin, vmax = self.axis.get_view_interval()
d = abs(vmax-vmin)
if self._useOffset: self._set_offset(d)
self._set_orderOfMagnitude(d)
self._set_format()
def _set_offset(self, range):
# offset of 20,001 is 20,000, for example
locs = self.locs
if locs is None or not len(locs) or range == 0:
self.offset = 0
return
ave_loc = np.mean(locs)
if ave_loc: # dont want to take log10(0)
ave_oom = math.floor(math.log10(np.mean(np.absolute(locs))))
range_oom = math.floor(math.log10(range))
if np.absolute(ave_oom-range_oom) >= 3: # four sig-figs
if ave_loc < 0:
self.offset = math.ceil(np.max(locs)/10**range_oom)*10**range_oom
else:
self.offset = math.floor(np.min(locs)/10**(range_oom))*10**(range_oom)
else: self.offset = 0
def _set_orderOfMagnitude(self,range):
# if scientific notation is to be used, find the appropriate exponent
# if using an numerical offset, find the exponent after applying the offset
if not self._scientific:
self.orderOfMagnitude = 0
return
locs = np.absolute(self.locs)
if self.offset: oom = math.floor(math.log10(range))
else:
if locs[0] > locs[-1]: val = locs[0]
else: val = locs[-1]
if val == 0: oom = 0
else: oom = math.floor(math.log10(val))
if oom <= self._powerlimits[0]:
self.orderOfMagnitude = oom
elif oom >= self._powerlimits[1]:
self.orderOfMagnitude = oom
else:
self.orderOfMagnitude = 0
def _set_format(self):
# set the format string to format all the ticklabels
# The floating point black magic (adding 1e-15 and formatting
# to 8 digits) may warrant review and cleanup.
locs = (np.asarray(self.locs)-self.offset) / 10**self.orderOfMagnitude+1e-15
sigfigs = [len(str('%1.8f'% loc).split('.')[1].rstrip('0')) \
for loc in locs]
sigfigs.sort()
self.format = '%1.' + str(sigfigs[-1]) + 'f'
if self._usetex:
self.format = '$%s$' % self.format
elif self._useMathText:
self.format = '$\mathdefault{%s}$' % self.format
def pprint_val(self, x):
xp = (x-self.offset)/10**self.orderOfMagnitude
if np.absolute(xp) < 1e-8: xp = 0
return self.format % xp
def _formatSciNotation(self, s):
# transform 1e+004 into 1e4, for example
tup = s.split('e')
try:
significand = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
if self._useMathText or self._usetex:
if significand == '1':
# reformat 1x10^y as 10^y
significand = ''
if exponent:
exponent = '10^{%s%s}'%(sign, exponent)
if significand and exponent:
return r'%s{\times}%s'%(significand, exponent)
else:
return r'%s%s'%(significand, exponent)
else:
s = ('%se%s%s' %(significand, sign, exponent)).rstrip('e')
return s
except IndexError, msg:
return s
class LogFormatter(Formatter):
"""
Format values for log axis;
if attribute *decadeOnly* is True, only the decades will be labelled.
"""
def __init__(self, base=10.0, labelOnlyBase = True):
"""
*base* is used to locate the decade tick,
which will be the only one to be labeled if *labelOnlyBase*
is ``False``
"""
self._base = base+0.0
self.labelOnlyBase=labelOnlyBase
self.decadeOnly = True
def base(self,base):
'change the *base* for labeling - warning: should always match the base used for :class:`LogLocator`'
self._base=base
def label_minor(self,labelOnlyBase):
'switch on/off minor ticks labeling'
self.labelOnlyBase=labelOnlyBase
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
vmin, vmax = self.axis.get_view_interval()
d = abs(vmax - vmin)
b=self._base
if x == 0.0:
return '0'
sign = np.sign(x)
# only label the decades
fx = math.log(abs(x))/math.log(b)
isDecade = self.is_decade(fx)
if not isDecade and self.labelOnlyBase: s = ''
elif x>10000: s= '%1.0e'%x
elif x<1: s = '%1.0e'%x
else : s = self.pprint_val(x,d)
if sign == -1:
s = '-%s' % s
return self.fix_minus(s)
def format_data(self,value):
self.labelOnlyBase = False
value = cbook.strip_math(self.__call__(value))
self.labelOnlyBase = True
return value
def format_data_short(self,value):
'return a short formatted string representation of a number'
return '%1.3g'%value
def is_decade(self, x):
n = self.nearest_long(x)
return abs(x-n)<1e-10
def nearest_long(self, x):
if x==0: return 0L
elif x>0: return long(x+0.5)
else: return long(x-0.5)
def pprint_val(self, x, d):
#if the number is not too big and it's an int, format it as an
#int
if abs(x)<1e4 and x==int(x): return '%d' % x
if d < 1e-2: fmt = '%1.3e'
elif d < 1e-1: fmt = '%1.3f'
elif d > 1e5: fmt = '%1.1e'
elif d > 10 : fmt = '%1.1f'
elif d > 1 : fmt = '%1.2f'
else: fmt = '%1.3f'
s = fmt % x
#print d, x, fmt, s
tup = s.split('e')
if len(tup)==2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
s = '%se%s%s' %(mantissa, sign, exponent)
else:
s = s.rstrip('0').rstrip('.')
return s
class LogFormatterExponent(LogFormatter):
"""
Format values for log axis; using ``exponent = log_base(value)``
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
d = abs(vmax-vmin)
b=self._base
if x == 0:
return '0'
sign = np.sign(x)
# only label the decades
fx = math.log(abs(x))/math.log(b)
isDecade = self.is_decade(fx)
if not isDecade and self.labelOnlyBase: s = ''
#if 0: pass
elif fx>10000: s= '%1.0e'%fx
#elif x<1: s = '$10^{%d}$'%fx
#elif x<1: s = '10^%d'%fx
elif fx<1: s = '%1.0e'%fx
else : s = self.pprint_val(fx,d)
if sign == -1:
s = '-%s' % s
return self.fix_minus(s)
class LogFormatterMathtext(LogFormatter):
"""
Format values for log axis; using ``exponent = log_base(value)``
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
b = self._base
# only label the decades
if x == 0:
return '$0$'
sign = np.sign(x)
fx = math.log(abs(x))/math.log(b)
isDecade = self.is_decade(fx)
usetex = rcParams['text.usetex']
if sign == -1:
sign_string = '-'
else:
sign_string = ''
if not isDecade and self.labelOnlyBase: s = ''
elif not isDecade:
if usetex:
s = r'$%s%d^{%.2f}$'% (sign_string, b, fx)
else:
s = '$\mathdefault{%s%d^{%.2f}}$'% (sign_string, b, fx)
else:
if usetex:
s = r'$%s%d^{%d}$'% (sign_string, b, self.nearest_long(fx))
else:
s = r'$\mathdefault{%s%d^{%d}}$'% (sign_string, b, self.nearest_long(fx))
return s
class Locator(TickHelper):
"""
Determine the tick locations;
Note, you should not use the same locator between different :class:`~matplotlib.axis.Axis`
because the locator stores references to the Axis data and view
limits
"""
def __call__(self):
'Return the locations of the ticks'
raise NotImplementedError('Derived must override')
def view_limits(self, vmin, vmax):
"""
select a scale for the range from vmin to vmax
Normally This will be overridden.
"""
return mtransforms.nonsingular(vmin, vmax)
def autoscale(self):
'autoscale the view limits'
return self.view_limits(*self.axis.get_view_interval())
def pan(self, numsteps):
'Pan numticks (can be positive or negative)'
ticks = self()
numticks = len(ticks)
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
if numticks>2:
step = numsteps*abs(ticks[0]-ticks[1])
else:
d = abs(vmax-vmin)
step = numsteps*d/6.
vmin += step
vmax += step
self.axis.set_view_interval(vmin, vmax, ignore=True)
def zoom(self, direction):
"Zoom in/out on axis; if direction is >0 zoom in, else zoom out"
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
interval = abs(vmax-vmin)
step = 0.1*interval*direction
self.axis.set_view_interval(vmin + step, vmax - step, ignore=True)
def refresh(self):
'refresh internal information based on current lim'
pass
class IndexLocator(Locator):
"""
Place a tick on every multiple of some base number of points
plotted, eg on every 5th point. It is assumed that you are doing
index plotting; ie the axis is 0, len(data). This is mainly
useful for x ticks.
"""
def __init__(self, base, offset):
'place ticks on the i-th data points where (i-offset)%base==0'
self._base = base
self.offset = offset
def __call__(self):
'Return the locations of the ticks'
dmin, dmax = self.axis.get_data_interval()
return np.arange(dmin + self.offset, dmax+1, self._base)
class FixedLocator(Locator):
"""
Tick locations are fixed. If nbins is not None,
the array of possible positions will be subsampled to
keep the number of ticks <= nbins +1.
"""
def __init__(self, locs, nbins=None):
self.locs = locs
self.nbins = nbins
if self.nbins is not None:
self.nbins = max(self.nbins, 2)
def __call__(self):
'Return the locations of the ticks'
if self.nbins is None:
return self.locs
step = max(int(0.99 + len(self.locs) / float(self.nbins)), 1)
return self.locs[::step]
class NullLocator(Locator):
"""
No ticks
"""
def __call__(self):
'Return the locations of the ticks'
return []
class LinearLocator(Locator):
"""
Determine the tick locations
The first time this function is called it will try to set the
number of ticks to make a nice tick partitioning. Thereafter the
number of ticks will be fixed so that interactive navigation will
be nice
"""
def __init__(self, numticks = None, presets=None):
"""
Use presets to set locs based on lom. A dict mapping vmin, vmax->locs
"""
self.numticks = numticks
if presets is None:
self.presets = {}
else:
self.presets = presets
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
if vmax<vmin:
vmin, vmax = vmax, vmin
if (vmin, vmax) in self.presets:
return self.presets[(vmin, vmax)]
if self.numticks is None:
self._set_numticks()
if self.numticks==0: return []
ticklocs = np.linspace(vmin, vmax, self.numticks)
return ticklocs
def _set_numticks(self):
self.numticks = 11 # todo; be smart here; this is just for dev
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
if vmax<vmin:
vmin, vmax = vmax, vmin
if vmin==vmax:
vmin-=1
vmax+=1
exponent, remainder = divmod(math.log10(vmax - vmin), 1)
if remainder < 0.5:
exponent -= 1
scale = 10**(-exponent)
vmin = math.floor(scale*vmin)/scale
vmax = math.ceil(scale*vmax)/scale
return mtransforms.nonsingular(vmin, vmax)
def closeto(x,y):
if abs(x-y)<1e-10: return True
else: return False
class Base:
'this solution has some hacks to deal with floating point inaccuracies'
def __init__(self, base):
assert(base>0)
self._base = base
def lt(self, x):
'return the largest multiple of base < x'
d,m = divmod(x, self._base)
if closeto(m,0) and not closeto(m/self._base,1):
return (d-1)*self._base
return d*self._base
def le(self, x):
'return the largest multiple of base <= x'
d,m = divmod(x, self._base)
if closeto(m/self._base,1): # was closeto(m, self._base)
#looks like floating point error
return (d+1)*self._base
return d*self._base
def gt(self, x):
'return the smallest multiple of base > x'
d,m = divmod(x, self._base)
if closeto(m/self._base,1):
#looks like floating point error
return (d+2)*self._base
return (d+1)*self._base
def ge(self, x):
'return the smallest multiple of base >= x'
d,m = divmod(x, self._base)
if closeto(m,0) and not closeto(m/self._base,1):
return d*self._base
return (d+1)*self._base
def get_base(self):
return self._base
class MultipleLocator(Locator):
"""
Set a tick on every integer that is multiple of base in the
view interval
"""
def __init__(self, base=1.0):
self._base = Base(base)
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
if vmax<vmin:
vmin, vmax = vmax, vmin
vmin = self._base.ge(vmin)
base = self._base.get_base()
n = (vmax - vmin + 0.001*base)//base
locs = vmin + np.arange(n+1) * base
return locs
def view_limits(self, dmin, dmax):
"""
Set the view limits to the nearest multiples of base that
contain the data
"""
vmin = self._base.le(dmin)
vmax = self._base.ge(dmax)
if vmin==vmax:
vmin -=1
vmax +=1
return mtransforms.nonsingular(vmin, vmax)
def scale_range(vmin, vmax, n = 1, threshold=100):
dv = abs(vmax - vmin)
maxabsv = max(abs(vmin), abs(vmax))
if maxabsv == 0 or dv/maxabsv < 1e-12:
return 1.0, 0.0
meanv = 0.5*(vmax+vmin)
if abs(meanv)/dv < threshold:
offset = 0
elif meanv > 0:
ex = divmod(math.log10(meanv), 1)[0]
offset = 10**ex
else:
ex = divmod(math.log10(-meanv), 1)[0]
offset = -10**ex
ex = divmod(math.log10(dv/n), 1)[0]
scale = 10**ex
return scale, offset
class MaxNLocator(Locator):
"""
Select no more than N intervals at nice locations.
"""
def __init__(self, nbins = 10, steps = None,
trim = True,
integer=False,
symmetric=False):
self._nbins = int(nbins)
self._trim = trim
self._integer = integer
self._symmetric = symmetric
if steps is None:
self._steps = [1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10]
else:
if int(steps[-1]) != 10:
steps = list(steps)
steps.append(10)
self._steps = steps
if integer:
self._steps = [n for n in self._steps if divmod(n,1)[1] < 0.001]
def bin_boundaries(self, vmin, vmax):
nbins = self._nbins
scale, offset = scale_range(vmin, vmax, nbins)
if self._integer:
scale = max(1, scale)
vmin -= offset
vmax -= offset
raw_step = (vmax-vmin)/nbins
scaled_raw_step = raw_step/scale
for step in self._steps:
if step < scaled_raw_step:
continue
step *= scale
best_vmin = step*divmod(vmin, step)[0]
best_vmax = best_vmin + step*nbins
if (best_vmax >= vmax):
break
if self._trim:
extra_bins = int(divmod((best_vmax - vmax), step)[0])
nbins -= extra_bins
return (np.arange(nbins+1) * step + best_vmin + offset)
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
return self.bin_boundaries(vmin, vmax)
def view_limits(self, dmin, dmax):
if self._symmetric:
maxabs = max(abs(dmin), abs(dmax))
dmin = -maxabs
dmax = maxabs
dmin, dmax = mtransforms.nonsingular(dmin, dmax, expander = 0.05)
return np.take(self.bin_boundaries(dmin, dmax), [0,-1])
def decade_down(x, base=10):
'floor x to the nearest lower decade'
lx = math.floor(math.log(x)/math.log(base))
return base**lx
def decade_up(x, base=10):
'ceil x to the nearest higher decade'
lx = math.ceil(math.log(x)/math.log(base))
return base**lx
def is_decade(x,base=10):
lx = math.log(x)/math.log(base)
return lx==int(lx)
class LogLocator(Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, base=10.0, subs=[1.0]):
"""
place ticks on the location= base**i*subs[j]
"""
self.base(base)
self.subs(subs)
self.numticks = 15
def base(self,base):
"""
set the base of the log scaling (major tick every base**i, i interger)
"""
self._base=base+0.0
def subs(self,subs):
"""
set the minor ticks the log scaling every base**i*subs[j]
"""
if subs is None:
self._subs = None # autosub
else:
self._subs = np.asarray(subs)+0.0
def _set_numticks(self):
self.numticks = 15 # todo; be smart here; this is just for dev
def __call__(self):
'Return the locations of the ticks'
b=self._base
vmin, vmax = self.axis.get_view_interval()
if vmin <= 0.0:
vmin = self.axis.get_minpos()
if vmin <= 0.0:
raise ValueError(
"Data has no positive values, and therefore can not be log-scaled.")
vmin = math.log(vmin)/math.log(b)
vmax = math.log(vmax)/math.log(b)
if vmax<vmin:
vmin, vmax = vmax, vmin
numdec = math.floor(vmax)-math.ceil(vmin)
if self._subs is None: # autosub
if numdec>10: subs = np.array([1.0])
elif numdec>6: subs = np.arange(2.0, b, 2.0)
else: subs = np.arange(2.0, b)
else:
subs = self._subs
stride = 1
while numdec/stride+1 > self.numticks:
stride += 1
decades = np.arange(math.floor(vmin),
math.ceil(vmax)+stride, stride)
if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
ticklocs = []
for decadeStart in b**decades:
ticklocs.extend( subs*decadeStart )
else:
ticklocs = b**decades
return np.array(ticklocs)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
if vmax<vmin:
vmin, vmax = vmax, vmin
minpos = self.axis.get_minpos()
if minpos<=0:
raise ValueError(
"Data has no positive values, and therefore can not be log-scaled.")
if vmin <= minpos:
vmin = minpos
if not is_decade(vmin,self._base): vmin = decade_down(vmin,self._base)
if not is_decade(vmax,self._base): vmax = decade_up(vmax,self._base)
if vmin==vmax:
vmin = decade_down(vmin,self._base)
vmax = decade_up(vmax,self._base)
result = mtransforms.nonsingular(vmin, vmax)
return result
class SymmetricalLogLocator(Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, transform, subs=[1.0]):
"""
place ticks on the location= base**i*subs[j]
"""
self._transform = transform
self._subs = subs
self.numticks = 15
def _set_numticks(self):
self.numticks = 15 # todo; be smart here; this is just for dev
def __call__(self):
'Return the locations of the ticks'
b = self._transform.base
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = self._transform.transform((vmin, vmax))
if vmax<vmin:
vmin, vmax = vmax, vmin
numdec = math.floor(vmax)-math.ceil(vmin)
if self._subs is None:
if numdec>10: subs = np.array([1.0])
elif numdec>6: subs = np.arange(2.0, b, 2.0)
else: subs = np.arange(2.0, b)
else:
subs = np.asarray(self._subs)
stride = 1
while numdec/stride+1 > self.numticks:
stride += 1
decades = np.arange(math.floor(vmin), math.ceil(vmax)+stride, stride)
if len(subs) > 1 or subs[0] != 1.0:
ticklocs = []
for decade in decades:
ticklocs.extend(subs * (np.sign(decade) * b ** np.abs(decade)))
else:
ticklocs = np.sign(decades) * b ** np.abs(decades)
return np.array(ticklocs)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
b = self._transform.base
if vmax<vmin:
vmin, vmax = vmax, vmin
if not is_decade(abs(vmin), b):
if vmin < 0:
vmin = -decade_up(-vmin, b)
else:
vmin = decade_down(vmin, b)
if not is_decade(abs(vmax), b):
if vmax < 0:
vmax = -decade_down(-vmax, b)
else:
vmax = decade_up(vmax, b)
if vmin == vmax:
if vmin < 0:
vmin = -decade_up(-vmin, b)
vmax = -decade_down(-vmax, b)
else:
vmin = decade_down(vmin, b)
vmax = decade_up(vmax, b)
result = mtransforms.nonsingular(vmin, vmax)
return result
class AutoLocator(MaxNLocator):
def __init__(self):
MaxNLocator.__init__(self, nbins=9, steps=[1, 2, 5, 10])
class OldAutoLocator(Locator):
"""
On autoscale this class picks the best MultipleLocator to set the
view limits and the tick locs.
"""
def __init__(self):
self._locator = LinearLocator()
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def refresh(self):
'refresh internal information based on current lim'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
d = abs(vmax-vmin)
self._locator = self.get_locator(d)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
d = abs(vmax-vmin)
self._locator = self.get_locator(d)
return self._locator.view_limits(vmin, vmax)
def get_locator(self, d):
'pick the best locator based on a distance'
d = abs(d)
if d<=0:
locator = MultipleLocator(0.2)
else:
try: ld = math.log10(d)
except OverflowError:
raise RuntimeError('AutoLocator illegal data interval range')
fld = math.floor(ld)
base = 10**fld
#if ld==fld: base = 10**(fld-1)
#else: base = 10**fld
if d >= 5*base : ticksize = base
elif d >= 2*base : ticksize = base/2.0
else : ticksize = base/5.0
locator = MultipleLocator(ticksize)
return locator
__all__ = ('TickHelper', 'Formatter', 'FixedFormatter',
'NullFormatter', 'FuncFormatter', 'FormatStrFormatter',
'ScalarFormatter', 'LogFormatter', 'LogFormatterExponent',
'LogFormatterMathtext', 'Locator', 'IndexLocator',
'FixedLocator', 'NullLocator', 'LinearLocator',
'LogLocator', 'AutoLocator', 'MultipleLocator',
'MaxNLocator', )
| agpl-3.0 |
louispotok/pandas | pandas/tests/test_multilevel.py | 1 | 107731 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101,W0141
from warnings import catch_warnings
import datetime
import itertools
import pytest
import pytz
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notna, isna, Timestamp
from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, product as
cart_product, zip)
import pandas as pd
import pandas._libs.index as _index
class Base(object):
def setup_method(self, method):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'], inplace=True)
class TestMultiLevel(Base):
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_append_index(self):
idx1 = Index([1.1, 1.2, 1.3])
idx2 = pd.date_range('2011-01-01', freq='D', periods=3,
tz='Asia/Tokyo')
idx3 = Index(['A', 'B', 'C'])
midx_lv2 = MultiIndex.from_arrays([idx1, idx2])
midx_lv3 = MultiIndex.from_arrays([idx1, idx2, idx3])
result = idx1.append(midx_lv2)
# see gh-7112
tz = pytz.timezone('Asia/Tokyo')
expected_tuples = [(1.1, tz.localize(datetime.datetime(2011, 1, 1))),
(1.2, tz.localize(datetime.datetime(2011, 1, 2))),
(1.3, tz.localize(datetime.datetime(2011, 1, 3)))]
expected = Index([1.1, 1.2, 1.3] + expected_tuples)
tm.assert_index_equal(result, expected)
result = midx_lv2.append(idx1)
expected = Index(expected_tuples + [1.1, 1.2, 1.3])
tm.assert_index_equal(result, expected)
result = midx_lv2.append(midx_lv2)
expected = MultiIndex.from_arrays([idx1.append(idx1),
idx2.append(idx2)])
tm.assert_index_equal(result, expected)
result = midx_lv2.append(midx_lv3)
tm.assert_index_equal(result, expected)
result = midx_lv3.append(midx_lv2)
expected = Index._simple_new(
np.array([(1.1, tz.localize(datetime.datetime(2011, 1, 1)), 'A'),
(1.2, tz.localize(datetime.datetime(2011, 1, 2)), 'B'),
(1.3, tz.localize(datetime.datetime(2011, 1, 3)), 'C')] +
expected_tuples), None)
tm.assert_index_equal(result, expected)
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
assert isinstance(multi.index, MultiIndex)
assert not isinstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
assert isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']), np.array(
['x', 'y', 'x', 'y'])])
assert isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'], ['x', 'y', 'x', 'y']])
assert isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
assert isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
tm.assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
tm.assert_series_equal(result, expected, check_names=False)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
tm.assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
tm.assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(level='month').transform(
np.sum)
expected = op(self.ymd['A'], broadcasted)
expected.name = 'A'
tm.assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
unpickled = tm.round_trip_pickle(frame)
tm.assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
expected = self.frame.iloc[[0, 3]]
reindexed = self.frame.loc[[('foo', 'one'), ('bar', 'one')]]
tm.assert_frame_equal(reindexed, expected)
with catch_warnings(record=True):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
tm.assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
assert chunk.index is new_index
chunk = self.ymd.loc[new_index]
assert chunk.index is new_index
with catch_warnings(record=True):
chunk = self.ymd.ix[new_index]
assert chunk.index is new_index
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
assert chunk.columns is new_index
chunk = ymdT.loc[:, new_index]
assert chunk.columns is new_index
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
assert lines[2].startswith('a 0 foo')
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
tm.assert_almost_equal(col.values, df.values[:, 0])
with pytest.raises(KeyError):
df[('foo', 'four')]
with pytest.raises(KeyError):
df['foobar']
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
# TODO(wesm): unused?
# result2 = s.loc[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
tm.assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
assert result == expected
# fancy
expected = s.reindex(s.index[49:51])
result = s.loc[[(2000, 3, 10), (2000, 3, 13)]]
tm.assert_series_equal(result, expected)
with catch_warnings(record=True):
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
tm.assert_series_equal(result, expected)
# key error
pytest.raises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
pytest.raises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
tm.assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
assert isna(s.values[42:65]).all()
assert notna(s.values[:42]).all()
assert notna(s.values[65:]).all()
s[2000, 3, 10] = np.nan
assert isna(s[49])
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
tm.assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
with tm.assert_raises_regex(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.iloc[:4]
expected = self.frame[:4]
tm.assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.iloc[:4] = 0
assert (cp.values[:4] == 0).all()
assert (cp.values[4:] != 0).all()
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.loc[:, 'value']
tm.assert_series_equal(df['value'], result)
with catch_warnings(record=True):
result = df.ix[:, 'value']
tm.assert_series_equal(df['value'], result)
result = df.loc[df.index[1:3], 'value']
tm.assert_series_equal(df['value'][1:3], result)
result = df.loc[:, :]
tm.assert_frame_equal(df, result)
result = df
df.loc[:, 'value'] = 10
result['value'] = 10
tm.assert_frame_equal(df, result)
df.loc[:, :] = 10
tm.assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'], 'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'], ['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
tm.assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
tm.assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
tm.assert_frame_equal(cp['a'], cp['b'])
# ---------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
assert (df['A'].values == 0).all()
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
sliced_a1 = df['A', '1']
sliced_a2 = df['A', '2']
sliced_b1 = df['B', '1']
tm.assert_series_equal(sliced_a1, sliced_b1, check_names=False)
tm.assert_series_equal(sliced_a2, sliced_b1, check_names=False)
assert sliced_a1.name == ('A', '1')
assert sliced_a2.name == ('A', '2')
assert sliced_b1.name == ('B', '1')
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.loc[(0, 0), :]
expected = idf.loc[0, 0]
expected2 = idf.xs((0, 0))
with catch_warnings(record=True):
expected3 = idf.ix[0, 0]
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected2)
tm.assert_series_equal(result, expected3)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.loc[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.loc[2000, 1, 6][['A', 'B', 'C']]
tm.assert_series_equal(result, expected)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.loc[('bar', 'two')]
tm.assert_series_equal(xs, xs2)
tm.assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a', 'abcde', 1),
('b', 'bbcde', 2),
('y', 'yzcde', 25),
('z', 'xbcde', 24),
('z', None, 26),
('z', 'zbcde', 25),
('z', 'ybcde', 26),
]
df = DataFrame(acc,
columns=['a1', 'a2', 'cnt']).set_index(['a1', 'a2'])
expected = DataFrame({'cnt': [24, 26, 25, 26]}, index=Index(
['xbcde', np.nan, 'zbcde', 'ybcde'], name='a2'))
result = df.xs('z', level='a1')
tm.assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.loc['foo']
expected = self.frame.T['foo'].T
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.loc[2000, 4]
tm.assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 1,
0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.loc['foo', 'one']
tm.assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'), (
'p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
tm.assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
pytest.raises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep=r'\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
tm.assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
pytest.raises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.loc[20111201, :]
tm.assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep=r'\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
assert len(result) == 2
tm.assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
tm.assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.loc[2000, 5]['A']
tm.assert_series_equal(result, expected)
# not implementing this for now
pytest.raises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# tm.assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
tm.assert_frame_equal(result, expected)
result = df['bar']
result2 = df.loc[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.loc[1:2]
exp = frame.reindex(frame.index[2:])
tm.assert_frame_equal(res, exp)
frame.loc[1:2] = 7
assert (frame.loc[1:2] == 7).values.all()
series = Series(np.random.randn(len(index)), index=index)
res = series.loc[1:2]
exp = series.reindex(series.index[2:])
tm.assert_series_equal(res, exp)
series.loc[1:2] = 7
assert (series.loc[1:2] == 7).values.all()
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.loc[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
# raises exception
pytest.raises(KeyError, frame.loc.__getitem__, 3)
# however this will work
result = self.frame.iloc[2]
expected = self.frame.xs(self.frame.index[2])
tm.assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
tm.assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
tm.assert_series_equal(dft['foo', 'two'], s > s.median())
# assert isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
tm.assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.loc[('bar', 'two'), 'B'] = 5
assert self.frame.loc[('bar', 'two'), 'B'] == 5
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.loc[('bar', 'two'), 1] = 7
assert df.loc[('bar', 'two'), 1] == 7
with catch_warnings(record=True):
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
assert df.loc[('bar', 'two'), 1] == 7
def test_fancy_slice_partial(self):
result = self.frame.loc['bar':'baz']
expected = self.frame[3:7]
tm.assert_frame_equal(result, expected)
result = self.ymd.loc[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
tm.assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.loc[('a', 'y'), :]
expected = df.loc[('a', 'y')]
tm.assert_frame_equal(result, expected)
result = df.loc[('a', 'y'), [1, 0]]
expected = df.loc[('a', 'y')][[1, 0]]
tm.assert_frame_equal(result, expected)
with catch_warnings(record=True):
result = df.ix[('a', 'y'), [1, 0]]
tm.assert_frame_equal(result, expected)
pytest.raises(KeyError, df.loc.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_delevel_infer_dtype(self):
tuples = [tuple
for tuple in cart_product(
['foo', 'bar'], [10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples, names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
assert is_integer_dtype(deleveled['prm1'])
assert is_float_dtype(deleveled['prm2'])
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
assert len(deleveled.columns) == len(self.ymd.columns)
deleveled = self.series.reset_index()
assert isinstance(deleveled, DataFrame)
assert len(deleveled.columns) == len(self.series.index.levels) + 1
deleveled = self.series.reset_index(drop=True)
assert isinstance(deleveled, Series)
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count()
expected = expected.reindex_like(result).astype('i8')
tm.assert_frame_equal(result, expected)
self.frame.iloc[1, [1, 2]] = np.nan
self.frame.iloc[7, [0, 1]] = np.nan
self.ymd.iloc[1, [1, 2]] = np.nan
self.ymd.iloc[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
tm.assert_raises_regex(
TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
tm.assert_index_equal(result.columns, Index(list('ABC'), name='exp'))
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'], ['one', 'two',
'three', 'four']],
labels=[[0, 0, 0, 2, 2], [2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
tm.assert_series_equal(
result.astype('f8'), expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
tm.assert_series_equal(
result.astype('f8'), expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0], name='A')
tm.assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_get_level_number_out_of_bounds(self):
with tm.assert_raises_regex(IndexError, "Too many levels"):
self.frame.index._get_level_number(2)
with tm.assert_raises_regex(IndexError,
"not a valid level number"):
self.frame.index._get_level_number(-3)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked.unstack()
# test that ints work
self.ymd.astype(int).unstack()
# test that int32 work
self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0), (
1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
tm.assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sort_index(level=2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked.sort_index(level=0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort_index(axis=1, ascending=False)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
tm.assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
tm.assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
tm.assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).loc[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
tm.assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
# GH10417
def check(left, right):
tm.assert_series_equal(left, right)
assert not left.index.is_unique
li, ri = left.index, right.index
tm.assert_index_equal(li, ri)
df = DataFrame(np.arange(12).reshape(4, 3),
index=list('abab'),
columns=['1st', '2nd', '3rd'])
mi = MultiIndex(levels=[['a', 'b'], ['1st', '2nd', '3rd']],
labels=[np.tile(
np.arange(2).repeat(3), 2), np.tile(
np.arange(3), 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
df.columns = ['1st', '2nd', '1st']
mi = MultiIndex(levels=[['a', 'b'], ['1st', '2nd']], labels=[np.tile(
np.arange(2).repeat(3), 2), np.tile(
[0, 1, 0], 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
tpls = ('a', 2), ('b', 1), ('a', 1), ('b', 2)
df.index = MultiIndex.from_tuples(tpls)
mi = MultiIndex(levels=[['a', 'b'], [1, 2], ['1st', '2nd']],
labels=[np.tile(
np.arange(2).repeat(3), 2), np.repeat(
[1, 0, 1], [3, 6, 3]), np.tile(
[0, 1, 0], 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
tm.assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sort_index(level=1, axis=1)
stacked = df.stack()
result = df['foo'].stack().sort_index()
tm.assert_series_equal(stacked['foo'], result, check_names=False)
assert result.name is None
assert stacked['bar'].dtype == np.float_
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive', 'activ', 'activ',
'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
tm.assert_series_equal(
restacked, result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
assert unstacked.index.name == 'first'
assert unstacked.columns.names == ['exp', 'second']
restacked = unstacked.stack()
assert restacked.index.names == self.frame.index.names
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
tm.assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
tm.assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
tm.assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
tm.assert_frame_equal(unstacked, expected)
assert unstacked.columns.names == expected.columns.names
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
tm.assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sort_index(level=0)
tm.assert_frame_equal(restacked, self.ymd)
assert restacked.index.names == self.ymd.index.names
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
tm.assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
tm.assert_frame_equal(unstacked, expected.loc[:, unstacked.columns])
def test_stack_names_and_numbers(self):
unstacked = self.ymd.unstack(['year', 'month'])
# Can't use mixture of names and numbers to stack
with tm.assert_raises_regex(ValueError, "level should contain"):
unstacked.stack([0, 'month'])
def test_stack_multiple_out_of_bounds(self):
# nlevels == 3
unstacked = self.ymd.unstack(['year', 'month'])
with tm.assert_raises_regex(IndexError, "Too many levels"):
unstacked.stack([2, 3])
with tm.assert_raises_regex(IndexError,
"not a valid level number"):
unstacked.stack([-4, -3])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
['2013-01', '2013-02', '2013-03'], freq='M', name='period1')
e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10',
'2013-11', '2013-12'],
freq='M', name='period2')
expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan]],
index=e_idx, columns=e_cols)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH 4342
idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02',
'2014-01', '2014-01'],
freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10',
'2013-10', '2014-02'],
freq='M', name='period2')
value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]}
idx = MultiIndex.from_arrays([idx1, idx2])
df = DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10',
'2013-12', '2014-02'], freq='M', name='period2')
e_cols = MultiIndex.from_arrays(['A A A B B B'.split(), e_2])
expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]],
index=e_1, columns=e_cols)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01',
'2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(
['2013-10', '2013-12', '2014-02'], freq='M', name='period2')
e_cols = MultiIndex.from_arrays(['A A B B'.split(), e_1])
expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]],
index=e_2, columns=e_cols)
tm.assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
""" bug when some uniques are not present in the data #3170"""
id_col = ([1] * 3) + ([2] * 3)
name = (['a'] * 3) + (['b'] * 3)
date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
multi = df.set_index(['DATE', 'ID'])
multi.columns.name = 'Params'
unst = multi.unstack('ID')
down = unst.resample('W-THU').mean()
rs = down.stack('ID')
xp = unst.loc[:, ['VAR1']].resample('W-THU').mean().stack('ID')
xp.columns.name = 'Params'
tm.assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH #3997
df = DataFrame({'A': ['a1', 'a2'], 'B': ['b1', 'b2'], 'C': [1, 1]})
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
assert len(stacked) > len(stacked.dropna())
stacked = df.unstack().stack(dropna=True)
tm.assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 1, 0, 1
]],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]])
df.index.names = ['a', 'b', 'c']
df.columns.names = ['d', 'e']
# it works!
df.unstack(['b', 'c'])
def test_groupby_transform(self):
s = self.frame['A']
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
result = applied.reindex(expected.index)
tm.assert_series_equal(result, expected, check_names=False)
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl #2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame({'A': np.random.randint(100, size=NUM_ROWS),
'B': np.random.randint(300, size=NUM_ROWS),
'C': np.random.randint(-7, 7, size=NUM_ROWS),
'D': np.random.randint(-19, 19, size=NUM_ROWS),
'E': np.random.randint(3000, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
idf = df.set_index(['A', 'B', 'C', 'D', 'E'])
# it works! is sufficient
idf.unstack('E')
def test_unstack_unobserved_keys(self):
# related to #2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, labels)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
assert len(result.columns) == 4
recons = result.stack()
tm.assert_frame_equal(recons, df)
def test_stack_order_with_unsorted_levels(self):
# GH 16323
def manual_compare_stacked(df, df_stacked, lev0, lev1):
assert all(df.loc[row, col] ==
df_stacked.loc[(row, col[lev0]), col[lev1]]
for row in df.index for col in df.columns)
# deep check for 1-row case
for width in [2, 3]:
levels_poss = itertools.product(
itertools.permutations([0, 1, 2], width),
repeat=2)
for levels in levels_poss:
columns = MultiIndex(levels=levels,
labels=[[0, 0, 1, 1],
[0, 1, 0, 1]])
df = DataFrame(columns=columns, data=[range(4)])
for stack_lev in range(2):
df_stacked = df.stack(stack_lev)
manual_compare_stacked(df, df_stacked,
stack_lev, 1 - stack_lev)
# check multi-row case
mi = MultiIndex(levels=[["A", "C", "B"], ["B", "A", "C"]],
labels=[np.repeat(range(3), 3), np.tile(range(3), 3)])
df = DataFrame(columns=mi, index=range(5),
data=np.arange(5 * len(mi)).reshape(5, -1))
manual_compare_stacked(df, df.stack(0), 0, 1)
def test_groupby_corner(self):
midx = MultiIndex(levels=[['foo'], ['bar'], ['baz']],
labels=[[0], [0], [0]],
names=['one', 'two', 'three'])
df = DataFrame([np.random.rand(4)], columns=['a', 'b', 'c', 'd'],
index=midx)
# should work
df.groupby(level='three')
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples([('f1', 's1'), ('f1', 's2'), (
'f2', 's1'), ('f2', 's2'), ('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.loc(axis=1)[df.columns.map(
lambda u: u[0] in ['f2', 'f3'])]
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
assert (result.columns == ['f2', 'f3']).all()
def test_join(self):
a = self.frame.loc[self.frame.index[:5], ['A']]
b = self.frame.loc[self.frame.index[2:], ['B', 'C']]
joined = a.join(b, how='outer').reindex(self.frame.index)
expected = self.frame.copy()
expected.values[np.isnan(joined.values)] = np.nan
assert not np.isnan(joined.values).all()
# TODO what should join do with names ?
tm.assert_frame_equal(joined, expected, check_names=False)
def test_swaplevel(self):
swapped = self.frame['A'].swaplevel()
swapped2 = self.frame['A'].swaplevel(0)
swapped3 = self.frame['A'].swaplevel(0, 1)
swapped4 = self.frame['A'].swaplevel('first', 'second')
assert not swapped.index.equals(self.frame.index)
tm.assert_series_equal(swapped, swapped2)
tm.assert_series_equal(swapped, swapped3)
tm.assert_series_equal(swapped, swapped4)
back = swapped.swaplevel()
back2 = swapped.swaplevel(0)
back3 = swapped.swaplevel(0, 1)
back4 = swapped.swaplevel('second', 'first')
assert back.index.equals(self.frame.index)
tm.assert_series_equal(back, back2)
tm.assert_series_equal(back, back3)
tm.assert_series_equal(back, back4)
ft = self.frame.T
swapped = ft.swaplevel('first', 'second', axis=1)
exp = self.frame.swaplevel('first', 'second').T
tm.assert_frame_equal(swapped, exp)
def test_swaplevel_panel(self):
with catch_warnings(record=True):
panel = Panel({'ItemA': self.frame, 'ItemB': self.frame * 2})
expected = panel.copy()
expected.major_axis = expected.major_axis.swaplevel(0, 1)
for result in (panel.swaplevel(axis='major'),
panel.swaplevel(0, axis='major'),
panel.swaplevel(0, 1, axis='major')):
tm.assert_panel_equal(result, expected)
def test_reorder_levels(self):
result = self.ymd.reorder_levels(['month', 'day', 'year'])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
tm.assert_frame_equal(result, expected)
result = self.ymd['A'].reorder_levels(['month', 'day', 'year'])
expected = self.ymd['A'].swaplevel(0, 1).swaplevel(1, 2)
tm.assert_series_equal(result, expected)
result = self.ymd.T.reorder_levels(['month', 'day', 'year'], axis=1)
expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
tm.assert_frame_equal(result, expected)
with tm.assert_raises_regex(TypeError, 'hierarchical axis'):
self.ymd.reorder_levels([1, 2], axis=1)
with tm.assert_raises_regex(IndexError, 'Too many levels'):
self.ymd.index.reorder_levels([1, 2, 3])
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
assert isinstance(df.columns, MultiIndex)
assert (df[2000, 1, 10] == df[2000, 1, 7]).all()
def test_alignment(self):
x = Series(data=[1, 2, 3], index=MultiIndex.from_tuples([("A", 1), (
"A", 2), ("B", 3)]))
y = Series(data=[4, 5, 6], index=MultiIndex.from_tuples([("Z", 1), (
"Z", 2), ("B", 3)]))
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
tm.assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
tm.assert_series_equal(res, exp)
def test_frame_getitem_view(self):
df = self.frame.T.copy()
# this works because we are modifying the underlying array
# really a no-no
df['foo'].values[:] = 0
assert (df['foo'].values == 0).all()
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
df = df.sort_index(level=0, axis=1)
# this will work, but will raise/warn as its chained assignment
def f():
df['foo']['one'] = 2
return df
pytest.raises(com.SettingWithCopyError, f)
try:
df = f()
except:
pass
assert (df['foo', 'one'] == 0).all()
def test_count(self):
frame = self.frame.copy()
frame.index.names = ['a', 'b']
result = frame.count(level='b')
expect = self.frame.count(level=1)
tm.assert_frame_equal(result, expect, check_names=False)
result = frame.count(level='a')
expect = self.frame.count(level=0)
tm.assert_frame_equal(result, expect, check_names=False)
series = self.series.copy()
series.index.names = ['a', 'b']
result = series.count(level='b')
expect = self.series.count(level=1)
tm.assert_series_equal(result, expect, check_names=False)
assert result.index.name == 'b'
result = series.count(level='a')
expect = self.series.count(level=0)
tm.assert_series_equal(result, expect, check_names=False)
assert result.index.name == 'a'
pytest.raises(KeyError, series.count, 'x')
pytest.raises(KeyError, frame.count, level='x')
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var', 'sem']
@pytest.mark.parametrize('sort', [True, False])
def test_series_group_min_max(self, sort):
# GH 17537
for op, level, skipna in cart_product(self.AGG_FUNCTIONS, lrange(2),
[False, True]):
grouped = self.series.groupby(level=level, sort=sort)
aggf = lambda x: getattr(x, op)(skipna=skipna)
# skipna=True
leftside = grouped.agg(aggf)
rightside = getattr(self.series, op)(level=level, skipna=skipna)
if sort:
rightside = rightside.sort_index(level=level)
tm.assert_series_equal(leftside, rightside)
@pytest.mark.parametrize('sort', [True, False])
def test_frame_group_ops(self, sort):
# GH 17537
self.frame.iloc[1, [1, 2]] = np.nan
self.frame.iloc[7, [0, 1]] = np.nan
for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2), lrange(2),
[False, True]):
if axis == 0:
frame = self.frame
else:
frame = self.frame.T
grouped = frame.groupby(level=level, axis=axis, sort=sort)
pieces = []
def aggf(x):
pieces.append(x)
return getattr(x, op)(skipna=skipna, axis=axis)
leftside = grouped.agg(aggf)
rightside = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
if sort:
rightside = rightside.sort_index(level=level, axis=axis)
frame = frame.sort_index(level=level, axis=axis)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level]
tm.assert_index_equal(leftside._get_axis(axis), level_index)
tm.assert_index_equal(rightside._get_axis(axis), level_index)
tm.assert_frame_equal(leftside, rightside)
def test_stat_op_corner(self):
obj = Series([10.0], index=MultiIndex.from_tuples([(2, 3)]))
result = obj.sum(level=0)
expected = Series([10.0], index=[2])
tm.assert_series_equal(result, expected)
def test_frame_any_all_group(self):
df = DataFrame(
{'data': [False, False, True, False, True, False, True]},
index=[
['one', 'one', 'two', 'one', 'two', 'two', 'two'],
[0, 1, 0, 2, 1, 2, 3]])
result = df.any(level=0)
ex = DataFrame({'data': [False, True]}, index=['one', 'two'])
tm.assert_frame_equal(result, ex)
result = df.all(level=0)
ex = DataFrame({'data': [False, False]}, index=['one', 'two'])
tm.assert_frame_equal(result, ex)
def test_std_var_pass_ddof(self):
index = MultiIndex.from_arrays([np.arange(5).repeat(10), np.tile(
np.arange(10), 5)])
df = DataFrame(np.random.randn(len(index), 5), index=index)
for meth in ['var', 'std']:
ddof = 4
alt = lambda x: getattr(x, meth)(ddof=ddof)
result = getattr(df[0], meth)(level=0, ddof=ddof)
expected = df[0].groupby(level=0).agg(alt)
tm.assert_series_equal(result, expected)
result = getattr(df, meth)(level=0, ddof=ddof)
expected = df.groupby(level=0).agg(alt)
tm.assert_frame_equal(result, expected)
def test_frame_series_agg_multiple_levels(self):
result = self.ymd.sum(level=['year', 'month'])
expected = self.ymd.groupby(level=['year', 'month']).sum()
tm.assert_frame_equal(result, expected)
result = self.ymd['A'].sum(level=['year', 'month'])
expected = self.ymd['A'].groupby(level=['year', 'month']).sum()
tm.assert_series_equal(result, expected)
def test_groupby_multilevel(self):
result = self.ymd.groupby(level=[0, 1]).mean()
k1 = self.ymd.index.get_level_values(0)
k2 = self.ymd.index.get_level_values(1)
expected = self.ymd.groupby([k1, k2]).mean()
# TODO groupby with level_values drops names
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.names == self.ymd.index.names[:2]
result2 = self.ymd.groupby(level=self.ymd.index.names[:2]).mean()
tm.assert_frame_equal(result, result2)
def test_groupby_multilevel_with_transform(self):
pass
def test_multilevel_consolidate(self):
index = MultiIndex.from_tuples([('foo', 'one'), ('foo', 'two'), (
'bar', 'one'), ('bar', 'two')])
df = DataFrame(np.random.randn(4, 4), index=index, columns=index)
df['Totals', ''] = df.sum(1)
df = df._consolidate()
def test_ix_preserve_names(self):
result = self.ymd.loc[2000]
result2 = self.ymd['A'].loc[2000]
assert result.index.names == self.ymd.index.names[1:]
assert result2.index.names == self.ymd.index.names[1:]
result = self.ymd.loc[2000, 2]
result2 = self.ymd['A'].loc[2000, 2]
assert result.index.name == self.ymd.index.names[2]
assert result2.index.name == self.ymd.index.names[2]
def test_partial_set(self):
# GH #397
df = self.ymd.copy()
exp = self.ymd.copy()
df.loc[2000, 4] = 0
exp.loc[2000, 4].values[:] = 0
tm.assert_frame_equal(df, exp)
df['A'].loc[2000, 4] = 1
exp['A'].loc[2000, 4].values[:] = 1
tm.assert_frame_equal(df, exp)
df.loc[2000] = 5
exp.loc[2000].values[:] = 5
tm.assert_frame_equal(df, exp)
# this works...for now
df['A'].iloc[14] = 5
assert df['A'][14] == 5
def test_unstack_preserve_types(self):
# GH #403
self.ymd['E'] = 'foo'
self.ymd['F'] = 2
unstacked = self.ymd.unstack('month')
assert unstacked['A', 1].dtype == np.float64
assert unstacked['E', 1].dtype == np.object_
assert unstacked['F', 1].dtype == np.float64
def test_unstack_group_index_overflow(self):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
index = MultiIndex(levels=[level] * 8 + [[0, 1]],
labels=[labels] * 8 + [np.arange(2).repeat(500)])
s = Series(np.arange(1000), index=index)
result = s.unstack()
assert result.shape == (500, 2)
# test roundtrip
stacked = result.stack()
tm.assert_series_equal(s, stacked.reindex(s.index))
# put it at beginning
index = MultiIndex(levels=[[0, 1]] + [level] * 8,
labels=[np.arange(2).repeat(500)] + [labels] * 8)
s = Series(np.arange(1000), index=index)
result = s.unstack(0)
assert result.shape == (500, 2)
# put it in middle
index = MultiIndex(levels=[level] * 4 + [[0, 1]] + [level] * 4,
labels=([labels] * 4 + [np.arange(2).repeat(500)] +
[labels] * 4))
s = Series(np.arange(1000), index=index)
result = s.unstack(4)
assert result.shape == (500, 2)
def test_pyint_engine(self):
# GH 18519 : when combinations of codes cannot be represented in 64
# bits, the index underlying the MultiIndex engine works with Python
# integers, rather than uint64.
N = 5
keys = [tuple(l) for l in [[0] * 10 * N,
[1] * 10 * N,
[2] * 10 * N,
[np.nan] * N + [2] * 9 * N,
[0] * N + [2] * 9 * N,
[np.nan] * N + [2] * 8 * N + [0] * N]]
# Each level contains 4 elements (including NaN), so it is represented
# in 2 bits, for a total of 2*N*10 = 100 > 64 bits. If we were using a
# 64 bit engine and truncating the first levels, the fourth and fifth
# keys would collide; if truncating the last levels, the fifth and
# sixth; if rotating bits rather than shifting, the third and fifth.
for idx in range(len(keys)):
index = MultiIndex.from_tuples(keys)
assert index.get_loc(keys[idx]) == idx
expected = np.arange(idx + 1, dtype=np.intp)
result = index.get_indexer([keys[i] for i in expected])
tm.assert_numpy_array_equal(result, expected)
# With missing key:
idces = range(len(keys))
expected = np.array([-1] + list(idces), dtype=np.intp)
missing = tuple([0, 1] * 5 * N)
result = index.get_indexer([missing] + [keys[i] for i in idces])
tm.assert_numpy_array_equal(result, expected)
def test_getitem_lowerdim_corner(self):
pytest.raises(KeyError, self.frame.loc.__getitem__,
(('bar', 'three'), 'B'))
# in theory should be inserting in a sorted space????
self.frame.loc[('bar', 'three'), 'B'] = 0
assert self.frame.sort_index().loc[('bar', 'three'), 'B'] == 0
# ---------------------------------------------------------------------
# AMBIGUOUS CASES!
def test_partial_ix_missing(self):
pytest.skip("skipping for now")
result = self.ymd.loc[2000, 0]
expected = self.ymd.loc[2000]['A']
tm.assert_series_equal(result, expected)
# need to put in some work here
# self.ymd.loc[2000, 0] = 0
# assert (self.ymd.loc[2000]['A'] == 0).all()
# Pretty sure the second (and maybe even the first) is already wrong.
pytest.raises(Exception, self.ymd.loc.__getitem__, (2000, 6))
pytest.raises(Exception, self.ymd.loc.__getitem__, (2000, 6), 0)
# ---------------------------------------------------------------------
def test_to_html(self):
self.ymd.columns.name = 'foo'
self.ymd.to_html()
self.ymd.T.to_html()
def test_level_with_tuples(self):
index = MultiIndex(levels=[[('foo', 'bar', 0), ('foo', 'baz', 0), (
'foo', 'qux', 0)], [0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar', 0)]
result2 = series.loc[('foo', 'bar', 0)]
expected = series[:2]
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
pytest.raises(KeyError, series.__getitem__, (('foo', 'bar', 0), 2))
result = frame.loc[('foo', 'bar', 0)]
result2 = frame.xs(('foo', 'bar', 0))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
index = MultiIndex(levels=[[('foo', 'bar'), ('foo', 'baz'), (
'foo', 'qux')], [0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar')]
result2 = series.loc[('foo', 'bar')]
expected = series[:2]
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = frame.loc[('foo', 'bar')]
result2 = frame.xs(('foo', 'bar'))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_int_series_slicing(self):
s = self.ymd['A']
result = s[5:]
expected = s.reindex(s.index[5:])
tm.assert_series_equal(result, expected)
exp = self.ymd['A'].copy()
s[5:] = 0
exp.values[5:] = 0
tm.assert_numpy_array_equal(s.values, exp.values)
result = self.ymd[5:]
expected = self.ymd.reindex(s.index[5:])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('unicode_strings', [True, False])
def test_mixed_depth_get(self, unicode_strings):
# If unicode_strings is True, the column labels in dataframe
# construction will use unicode strings in Python 2 (pull request
# #17099).
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
if unicode_strings:
arrays = [[u(s) for s in arr] for arr in arrays]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.randn(4, 6), columns=index)
result = df['a']
expected = df['a', '', ''].rename('a')
tm.assert_series_equal(result, expected)
result = df['routine1', 'result1']
expected = df['routine1', 'result1', '']
expected = expected.rename(('routine1', 'result1'))
tm.assert_series_equal(result, expected)
def test_mixed_depth_insert(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.copy()
expected = df.copy()
result['b'] = [1, 2, 3, 4]
expected['b', '', ''] = [1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
def test_mixed_depth_drop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.drop('a', axis=1)
expected = df.drop([('a', '', '')], axis=1)
tm.assert_frame_equal(expected, result)
result = df.drop(['top'], axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
expected = expected.drop([('top', 'OD', 'wy')], axis=1)
tm.assert_frame_equal(expected, result)
result = df.drop(('top', 'OD', 'wx'), axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
tm.assert_frame_equal(expected, result)
expected = df.drop([('top', 'OD', 'wy')], axis=1)
expected = df.drop('top', axis=1)
result = df.drop('result1', level=1, axis=1)
expected = df.drop([('routine1', 'result1', ''),
('routine2', 'result1', '')], axis=1)
tm.assert_frame_equal(expected, result)
def test_drop_nonunique(self):
df = DataFrame([["x-a", "x", "a", 1.5], ["x-a", "x", "a", 1.2],
["z-c", "z", "c", 3.1], ["x-a", "x", "a", 4.1],
["x-b", "x", "b", 5.1], ["x-b", "x", "b", 4.1],
["x-b", "x", "b", 2.2],
["y-a", "y", "a", 1.2], ["z-b", "z", "b", 2.1]],
columns=["var1", "var2", "var3", "var4"])
grp_size = df.groupby("var1").size()
drop_idx = grp_size.loc[grp_size == 1]
idf = df.set_index(["var1", "var2", "var3"])
# it works! #2101
result = idf.drop(drop_idx.index, level=0).reset_index()
expected = df[-df.var1.isin(drop_idx.index)]
result.index = expected.index
tm.assert_frame_equal(result, expected)
def test_mixed_depth_pop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
df1 = df.copy()
df2 = df.copy()
result = df1.pop('a')
expected = df2.pop(('a', '', ''))
tm.assert_series_equal(expected, result, check_names=False)
tm.assert_frame_equal(df1, df2)
assert result.name == 'a'
expected = df1['top']
df1 = df1.drop(['top'], axis=1)
result = df2.pop('top')
tm.assert_frame_equal(expected, result)
tm.assert_frame_equal(df1, df2)
def test_reindex_level_partial_selection(self):
result = self.frame.reindex(['foo', 'qux'], level=0)
expected = self.frame.iloc[[0, 1, 2, 7, 8, 9]]
tm.assert_frame_equal(result, expected)
result = self.frame.T.reindex(['foo', 'qux'], axis=1, level=0)
tm.assert_frame_equal(result, expected.T)
result = self.frame.loc[['foo', 'qux']]
tm.assert_frame_equal(result, expected)
result = self.frame['A'].loc[['foo', 'qux']]
tm.assert_series_equal(result, expected['A'])
result = self.frame.T.loc[:, ['foo', 'qux']]
tm.assert_frame_equal(result, expected.T)
def test_setitem_multiple_partial(self):
expected = self.frame.copy()
result = self.frame.copy()
result.loc[['foo', 'bar']] = 0
expected.loc['foo'] = 0
expected.loc['bar'] = 0
tm.assert_frame_equal(result, expected)
expected = self.frame.copy()
result = self.frame.copy()
result.loc['foo':'bar'] = 0
expected.loc['foo'] = 0
expected.loc['bar'] = 0
tm.assert_frame_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.loc[['foo', 'bar']] = 0
expected.loc['foo'] = 0
expected.loc['bar'] = 0
tm.assert_series_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.loc['foo':'bar'] = 0
expected.loc['foo'] = 0
expected.loc['bar'] = 0
tm.assert_series_equal(result, expected)
def test_drop_level(self):
result = self.frame.drop(['bar', 'qux'], level='first')
expected = self.frame.iloc[[0, 1, 2, 5, 6]]
tm.assert_frame_equal(result, expected)
result = self.frame.drop(['two'], level='second')
expected = self.frame.iloc[[0, 2, 3, 6, 7, 9]]
tm.assert_frame_equal(result, expected)
result = self.frame.T.drop(['bar', 'qux'], axis=1, level='first')
expected = self.frame.iloc[[0, 1, 2, 5, 6]].T
tm.assert_frame_equal(result, expected)
result = self.frame.T.drop(['two'], axis=1, level='second')
expected = self.frame.iloc[[0, 2, 3, 6, 7, 9]].T
tm.assert_frame_equal(result, expected)
def test_drop_level_nonunique_datetime(self):
# GH 12701
idx = Index([2, 3, 4, 4, 5], name='id')
idxdt = pd.to_datetime(['201603231400',
'201603231500',
'201603231600',
'201603231600',
'201603231700'])
df = DataFrame(np.arange(10).reshape(5, 2),
columns=list('ab'), index=idx)
df['tstamp'] = idxdt
df = df.set_index('tstamp', append=True)
ts = Timestamp('201603231600')
assert not df.index.is_unique
result = df.drop(ts, level='tstamp')
expected = df.loc[idx != 4]
tm.assert_frame_equal(result, expected)
def test_drop_preserve_names(self):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[1, 2, 3, 1, 2, 3]],
names=['one', 'two'])
df = DataFrame(np.random.randn(6, 3), index=index)
result = df.drop([(0, 2)])
assert result.index.names == ('one', 'two')
def test_unicode_repr_issues(self):
levels = [Index([u('a/\u03c3'), u('b/\u03c3'), u('c/\u03c3')]),
Index([0, 1])]
labels = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]
index = MultiIndex(levels=levels, labels=labels)
repr(index.levels)
# NumPy bug
# repr(index.get_level_values(1))
def test_unicode_repr_level_names(self):
index = MultiIndex.from_tuples([(0, 0), (1, 1)],
names=[u('\u0394'), 'i1'])
s = Series(lrange(2), index=index)
df = DataFrame(np.random.randn(2, 4), index=index)
repr(s)
repr(df)
def test_dataframe_insert_column_all_na(self):
# GH #1534
mix = MultiIndex.from_tuples([('1a', '2a'), ('1a', '2b'), ('1a', '2c')
])
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mix)
s = Series({(1, 1): 1, (1, 2): 2})
df['new'] = s
assert df['new'].isna().all()
def test_join_segfault(self):
# 1532
df1 = DataFrame({'a': [1, 1], 'b': [1, 2], 'x': [1, 2]})
df2 = DataFrame({'a': [2, 2], 'b': [1, 2], 'y': [1, 2]})
df1 = df1.set_index(['a', 'b'])
df2 = df2.set_index(['a', 'b'])
# it works!
for how in ['left', 'right', 'outer']:
df1.join(df2, how=how)
def test_set_column_scalar_with_ix(self):
subset = self.frame.index[[1, 4, 5]]
self.frame.loc[subset] = 99
assert (self.frame.loc[subset].values == 99).all()
col = self.frame['B']
col[subset] = 97
assert (self.frame.loc[subset, 'B'] == 97).all()
def test_frame_dict_constructor_empty_series(self):
s1 = Series([
1, 2, 3, 4
], index=MultiIndex.from_tuples([(1, 2), (1, 3), (2, 2), (2, 4)]))
s2 = Series([
1, 2, 3, 4
], index=MultiIndex.from_tuples([(1, 2), (1, 3), (3, 2), (3, 4)]))
s3 = Series()
# it works!
DataFrame({'foo': s1, 'bar': s2, 'baz': s3})
DataFrame.from_dict({'foo': s1, 'baz': s3, 'bar': s2})
def test_indexing_ambiguity_bug_1678(self):
columns = MultiIndex.from_tuples([('Ohio', 'Green'), ('Ohio', 'Red'), (
'Colorado', 'Green')])
index = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)
])
frame = DataFrame(np.arange(12).reshape((4, 3)), index=index,
columns=columns)
result = frame.iloc[:, 1]
exp = frame.loc[:, ('Ohio', 'Red')]
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_nonunique_assignment_1750(self):
df = DataFrame([[1, 1, "x", "X"], [1, 1, "y", "Y"], [1, 2, "z", "Z"]],
columns=list("ABCD"))
df = df.set_index(['A', 'B'])
ix = MultiIndex.from_tuples([(1, 1)])
df.loc[ix, "C"] = '_'
assert (df.xs((1, 1))['C'] == '_').all()
def test_indexing_over_hashtable_size_cutoff(self):
n = 10000
old_cutoff = _index._SIZE_CUTOFF
_index._SIZE_CUTOFF = 20000
s = Series(np.arange(n),
MultiIndex.from_arrays((["a"] * n, np.arange(n))))
# hai it works!
assert s[("a", 5)] == 5
assert s[("a", 6)] == 6
assert s[("a", 7)] == 7
_index._SIZE_CUTOFF = old_cutoff
def test_multiindex_na_repr(self):
# only an issue with long columns
from numpy import nan
df3 = DataFrame({
'A' * 30: {('A', 'A0006000', 'nuit'): 'A0006000'},
'B' * 30: {('A', 'A0006000', 'nuit'): nan},
'C' * 30: {('A', 'A0006000', 'nuit'): nan},
'D' * 30: {('A', 'A0006000', 'nuit'): nan},
'E' * 30: {('A', 'A0006000', 'nuit'): 'A'},
'F' * 30: {('A', 'A0006000', 'nuit'): nan},
})
idf = df3.set_index(['A' * 30, 'C' * 30])
repr(idf)
def test_assign_index_sequences(self):
# #2200
df = DataFrame({"a": [1, 2, 3],
"b": [4, 5, 6],
"c": [7, 8, 9]}).set_index(["a", "b"])
l = list(df.index)
l[0] = ("faz", "boo")
df.index = l
repr(df)
# this travels an improper code path
l[0] = ["faz", "boo"]
df.index = l
repr(df)
def test_tuples_have_na(self):
index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
labels=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0,
1, 2, 3]])
assert isna(index[4][0])
assert isna(index.values[4][0])
def test_duplicate_groupby_issues(self):
idx_tp = [('600809', '20061231'), ('600809', '20070331'),
('600809', '20070630'), ('600809', '20070331')]
dt = ['demo', 'demo', 'demo', 'demo']
idx = MultiIndex.from_tuples(idx_tp, names=['STK_ID', 'RPT_Date'])
s = Series(dt, index=idx)
result = s.groupby(s.index).first()
assert len(result) == 3
def test_duplicate_mi(self):
# GH 4516
df = DataFrame([['foo', 'bar', 1.0, 1], ['foo', 'bar', 2.0, 2],
['bah', 'bam', 3.0, 3],
['bah', 'bam', 4.0, 4], ['foo', 'bar', 5.0, 5],
['bah', 'bam', 6.0, 6]],
columns=list('ABCD'))
df = df.set_index(['A', 'B'])
df = df.sort_index(level=0)
expected = DataFrame([['foo', 'bar', 1.0, 1], ['foo', 'bar', 2.0, 2],
['foo', 'bar', 5.0, 5]],
columns=list('ABCD')).set_index(['A', 'B'])
result = df.loc[('foo', 'bar')]
tm.assert_frame_equal(result, expected)
def test_duplicated_drop_duplicates(self):
# GH 4060
idx = MultiIndex.from_arrays(([1, 2, 3, 1, 2, 3], [1, 1, 1, 1, 2, 2]))
expected = np.array(
[False, False, False, True, False, False], dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
expected = MultiIndex.from_arrays(([1, 2, 3, 2, 3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(), expected)
expected = np.array([True, False, False, False, False, False])
duplicated = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
expected = MultiIndex.from_arrays(([2, 3, 1, 2, 3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(keep='last'), expected)
expected = np.array([True, False, False, True, False, False])
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
expected = MultiIndex.from_arrays(([2, 3, 2, 3], [1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(keep=False), expected)
def test_multiindex_set_index(self):
# segfault in #3308
d = {'t1': [2, 2.5, 3], 't2': [4, 5, 6]}
df = DataFrame(d)
tuples = [(0, 1), (0, 2), (1, 2)]
df['tuples'] = tuples
index = MultiIndex.from_tuples(df['tuples'])
# it works!
df.set_index(index)
def test_datetimeindex(self):
idx1 = pd.DatetimeIndex(
['2013-04-01 9:00', '2013-04-02 9:00', '2013-04-03 9:00'
] * 2, tz='Asia/Tokyo')
idx2 = pd.date_range('2010/01/01', periods=6, freq='M',
tz='US/Eastern')
idx = MultiIndex.from_arrays([idx1, idx2])
expected1 = pd.DatetimeIndex(['2013-04-01 9:00', '2013-04-02 9:00',
'2013-04-03 9:00'], tz='Asia/Tokyo')
tm.assert_index_equal(idx.levels[0], expected1)
tm.assert_index_equal(idx.levels[1], idx2)
# from datetime combos
# GH 7888
date1 = datetime.date.today()
date2 = datetime.datetime.today()
date3 = Timestamp.today()
for d1, d2 in itertools.product(
[date1, date2, date3], [date1, date2, date3]):
index = MultiIndex.from_product([[d1], [d2]])
assert isinstance(index.levels[0], pd.DatetimeIndex)
assert isinstance(index.levels[1], pd.DatetimeIndex)
def test_constructor_with_tz(self):
index = pd.DatetimeIndex(['2013/01/01 09:00', '2013/01/02 09:00'],
name='dt1', tz='US/Pacific')
columns = pd.DatetimeIndex(['2014/01/01 09:00', '2014/01/02 09:00'],
name='dt2', tz='Asia/Tokyo')
result = MultiIndex.from_arrays([index, columns])
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
result = MultiIndex.from_arrays([Series(index), Series(columns)])
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
def test_set_index_datetime(self):
# GH 3950
df = DataFrame(
{'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'datetime': ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00'],
'value': range(6)})
df.index = pd.to_datetime(df.pop('datetime'), utc=True)
df.index = df.index.tz_convert('US/Pacific')
expected = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], name='datetime')
expected = expected.tz_localize('UTC').tz_convert('US/Pacific')
df = df.set_index('label', append=True)
tm.assert_index_equal(df.index.levels[0], expected)
tm.assert_index_equal(df.index.levels[1],
Index(['a', 'b'], name='label'))
df = df.swaplevel(0, 1)
tm.assert_index_equal(df.index.levels[0],
Index(['a', 'b'], name='label'))
tm.assert_index_equal(df.index.levels[1], expected)
df = DataFrame(np.random.random(6))
idx1 = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00'],
tz='US/Eastern')
idx2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-01 09:00',
'2012-04-01 09:00', '2012-04-02 09:00',
'2012-04-02 09:00', '2012-04-02 09:00'],
tz='US/Eastern')
idx3 = pd.date_range('2011-01-01 09:00', periods=6, tz='Asia/Tokyo')
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], tz='US/Eastern')
expected2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-02 09:00'],
tz='US/Eastern')
tm.assert_index_equal(df.index.levels[0], expected1)
tm.assert_index_equal(df.index.levels[1], expected2)
tm.assert_index_equal(df.index.levels[2], idx3)
# GH 7092
tm.assert_index_equal(df.index.get_level_values(0), idx1)
tm.assert_index_equal(df.index.get_level_values(1), idx2)
tm.assert_index_equal(df.index.get_level_values(2), idx3)
def test_reset_index_datetime(self):
# GH 3950
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:
idx1 = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz,
name='idx1')
idx2 = Index(range(5), name='idx2', dtype='int64')
idx = MultiIndex.from_arrays([idx1, idx2])
df = DataFrame(
{'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = DataFrame({'idx1': [datetime.datetime(2011, 1, 1),
datetime.datetime(2011, 1, 2),
datetime.datetime(2011, 1, 3),
datetime.datetime(2011, 1, 4),
datetime.datetime(2011, 1, 5)],
'idx2': np.arange(5, dtype='int64'),
'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx1', 'idx2', 'a', 'b'])
expected['idx1'] = expected['idx1'].apply(
lambda d: Timestamp(d, tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
idx3 = pd.date_range('1/1/2012', periods=5, freq='MS',
tz='Europe/Paris', name='idx3')
idx = MultiIndex.from_arrays([idx1, idx2, idx3])
df = DataFrame(
{'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = DataFrame({'idx1': [datetime.datetime(2011, 1, 1),
datetime.datetime(2011, 1, 2),
datetime.datetime(2011, 1, 3),
datetime.datetime(2011, 1, 4),
datetime.datetime(2011, 1, 5)],
'idx2': np.arange(5, dtype='int64'),
'idx3': [datetime.datetime(2012, 1, 1),
datetime.datetime(2012, 2, 1),
datetime.datetime(2012, 3, 1),
datetime.datetime(2012, 4, 1),
datetime.datetime(2012, 5, 1)],
'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx1', 'idx2', 'idx3', 'a', 'b'])
expected['idx1'] = expected['idx1'].apply(
lambda d: Timestamp(d, tz=tz))
expected['idx3'] = expected['idx3'].apply(
lambda d: Timestamp(d, tz='Europe/Paris'))
tm.assert_frame_equal(df.reset_index(), expected)
# GH 7793
idx = MultiIndex.from_product([['a', 'b'], pd.date_range(
'20130101', periods=3, tz=tz)])
df = DataFrame(
np.arange(6, dtype='int64').reshape(
6, 1), columns=['a'], index=idx)
expected = DataFrame({'level_0': 'a a a b b b'.split(),
'level_1': [
datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 2),
datetime.datetime(2013, 1, 3)] * 2,
'a': np.arange(6, dtype='int64')},
columns=['level_0', 'level_1', 'a'])
expected['level_1'] = expected['level_1'].apply(
lambda d: Timestamp(d, freq='D', tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
def test_reset_index_period(self):
# GH 7746
idx = MultiIndex.from_product(
[pd.period_range('20130101', periods=3, freq='M'), list('abc')],
names=['month', 'feature'])
df = DataFrame(np.arange(9, dtype='int64').reshape(-1, 1),
index=idx, columns=['a'])
expected = DataFrame({
'month': ([pd.Period('2013-01', freq='M')] * 3 +
[pd.Period('2013-02', freq='M')] * 3 +
[pd.Period('2013-03', freq='M')] * 3),
'feature': ['a', 'b', 'c'] * 3,
'a': np.arange(9, dtype='int64')
}, columns=['month', 'feature', 'a'])
tm.assert_frame_equal(df.reset_index(), expected)
def test_reset_index_multiindex_columns(self):
levels = [['A', ''], ['B', 'b']]
df = DataFrame([[0, 2], [1, 3]],
columns=MultiIndex.from_tuples(levels))
result = df[['B']].rename_axis('A').reset_index()
tm.assert_frame_equal(result, df)
# gh-16120: already existing column
with tm.assert_raises_regex(ValueError,
(r"cannot insert \('A', ''\), "
"already exists")):
df.rename_axis('A').reset_index()
# gh-16164: multiindex (tuple) full key
result = df.set_index([('A', '')]).reset_index()
tm.assert_frame_equal(result, df)
# with additional (unnamed) index level
idx_col = DataFrame([[0], [1]],
columns=MultiIndex.from_tuples([('level_0', '')]))
expected = pd.concat([idx_col, df[[('B', 'b'), ('A', '')]]], axis=1)
result = df.set_index([('B', 'b')], append=True).reset_index()
tm.assert_frame_equal(result, expected)
# with index name which is a too long tuple...
with tm.assert_raises_regex(ValueError,
("Item must have length equal to number "
"of levels.")):
df.rename_axis([('C', 'c', 'i')]).reset_index()
# or too short...
levels = [['A', 'a', ''], ['B', 'b', 'i']]
df2 = DataFrame([[0, 2], [1, 3]],
columns=MultiIndex.from_tuples(levels))
idx_col = DataFrame([[0], [1]],
columns=MultiIndex.from_tuples([('C', 'c', 'ii')]))
expected = pd.concat([idx_col, df2], axis=1)
result = df2.rename_axis([('C', 'c')]).reset_index(col_fill='ii')
tm.assert_frame_equal(result, expected)
# ... which is incompatible with col_fill=None
with tm.assert_raises_regex(ValueError,
("col_fill=None is incompatible with "
r"incomplete column name \('C', 'c'\)")):
df2.rename_axis([('C', 'c')]).reset_index(col_fill=None)
# with col_level != 0
result = df2.rename_axis([('c', 'ii')]).reset_index(col_level=1,
col_fill='C')
tm.assert_frame_equal(result, expected)
def test_set_index_period(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = pd.period_range('2011-01-01', periods=3, freq='M')
idx1 = idx1.append(idx1)
idx2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H')
idx2 = idx2.append(idx2).append(idx2)
idx3 = pd.period_range('2005', periods=6, freq='A')
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = pd.period_range('2011-01-01', periods=3, freq='M')
expected2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H')
tm.assert_index_equal(df.index.levels[0], expected1)
tm.assert_index_equal(df.index.levels[1], expected2)
tm.assert_index_equal(df.index.levels[2], idx3)
tm.assert_index_equal(df.index.get_level_values(0), idx1)
tm.assert_index_equal(df.index.get_level_values(1), idx2)
tm.assert_index_equal(df.index.get_level_values(2), idx3)
def test_repeat(self):
# GH 9361
# fixed by # GH 7891
m_idx = MultiIndex.from_tuples([(1, 2), (3, 4), (5, 6), (7, 8)])
data = ['a', 'b', 'c', 'd']
m_df = Series(data, index=m_idx)
assert m_df.repeat(3).shape == (3 * len(data), )
def test_iloc_mi(self):
# GH 13797
# Test if iloc can handle integer locations in MultiIndexed DataFrame
data = [['str00', 'str01'], ['str10', 'str11'], ['str20', 'srt21'],
['str30', 'str31'], ['str40', 'str41']]
mi = MultiIndex.from_tuples(
[('CC', 'A'), ('CC', 'B'), ('CC', 'B'), ('BB', 'a'), ('BB', 'b')])
expected = DataFrame(data)
df_mi = DataFrame(data, index=mi)
result = DataFrame([[df_mi.iloc[r, c] for c in range(2)]
for r in range(5)])
tm.assert_frame_equal(result, expected)
class TestSorted(Base):
""" everything you wanted to test about sorting """
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
assert result.index.names == self.frame.index.names
def test_sorting_repr_8017(self):
np.random.seed(0)
data = np.random.randn(3, 4)
for gen, extra in [([1., 3., 2., 5.], 4.), ([1, 3, 2, 5], 4),
([Timestamp('20130101'), Timestamp('20130103'),
Timestamp('20130102'), Timestamp('20130105')],
Timestamp('20130104')),
(['1one', '3one', '2one', '5one'], '4one')]:
columns = MultiIndex.from_tuples([('red', i) for i in gen])
df = DataFrame(data, index=list('def'), columns=columns)
df2 = pd.concat([df,
DataFrame('world', index=list('def'),
columns=MultiIndex.from_tuples(
[('red', extra)]))], axis=1)
# check that the repr is good
# make sure that we have a correct sparsified repr
# e.g. only 1 header of read
assert str(df2).splitlines()[0].split() == ['red']
# GH 8017
# sorting fails after columns added
# construct single-dtype then sort
result = df.copy().sort_index(axis=1)
expected = df.iloc[:, [0, 2, 1, 3]]
tm.assert_frame_equal(result, expected)
result = df2.sort_index(axis=1)
expected = df2.iloc[:, [0, 2, 1, 4, 3]]
tm.assert_frame_equal(result, expected)
# setitem then sort
result = df.copy()
result[('red', extra)] = 'world'
result = result.sort_index(axis=1)
tm.assert_frame_equal(result, expected)
def test_sort_index_level(self):
df = self.frame.copy()
df.index = np.arange(len(df))
# axis=1
# series
a_sorted = self.frame['A'].sort_index(level=0)
# preserve names
assert a_sorted.index.names == self.frame.index.names
# inplace
rs = self.frame.copy()
rs.sort_index(level=0, inplace=True)
tm.assert_frame_equal(rs, self.frame.sort_index(level=0))
def test_sort_index_level_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int64)
# it works!
result = df.sort_index(level=0)
assert result.index.lexsort_depth == 3
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int32)
# it works!
result = df.sort_index(level=0)
assert (result.dtypes.values == df.dtypes.values).all()
assert result.index.lexsort_depth == 3
def test_sort_index_level_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sort_index(level='second')
expected = self.frame.sort_index(level=1)
tm.assert_frame_equal(result, expected)
def test_sort_index_level_mixed(self):
sorted_before = self.frame.sort_index(level=1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sort_index(level=1)
tm.assert_frame_equal(sorted_before,
sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sort_index(level=1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sort_index(level=1, axis=1)
tm.assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
assert index.is_lexsorted()
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]])
assert not index.is_lexsorted()
index = MultiIndex(levels=levels,
labels=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]])
assert not index.is_lexsorted()
assert index.lexsort_depth == 0
def test_getitem_multilevel_index_tuple_not_sorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.loc[query_index, "data"]
xp_idx = MultiIndex.from_tuples([(0, 1, 0)], names=['a', 'b', 'c'])
xp = Series(['x'], index=xp_idx, name='data')
tm.assert_series_equal(rs, xp)
def test_getitem_slice_not_sorted(self):
df = self.frame.sort_index(level=1).T
# buglet with int typechecking
result = df.iloc[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
tm.assert_frame_equal(result, expected)
def test_frame_getitem_not_sorted2(self):
# 13431
df = DataFrame({'col1': ['b', 'd', 'b', 'a'],
'col2': [3, 1, 1, 2],
'data': ['one', 'two', 'three', 'four']})
df2 = df.set_index(['col1', 'col2'])
df2_original = df2.copy()
df2.index.set_levels(['b', 'd', 'a'], level='col1', inplace=True)
df2.index.set_labels([0, 1, 0, 2], level='col1', inplace=True)
assert not df2.index.is_lexsorted()
assert not df2.index.is_monotonic
assert df2_original.index.equals(df2.index)
expected = df2.sort_index()
assert expected.index.is_lexsorted()
assert expected.index.is_monotonic
result = df2.sort_index(level=0)
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns.values)]
result = df['foo']
result2 = df.loc[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
df = df.T
result = df.xs('foo')
result2 = df.loc['foo']
expected = df.reindex(df.index[arrays[0] == 'foo'])
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
arrays = [np.array(x) for x in zip(*index.values)]
result = s['qux']
result2 = s.loc['qux']
expected = s[arrays[0] == 'qux']
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
def test_sort_index_and_reconstruction(self):
# 15622
# lexsortedness should be identical
# across MultiIndex consruction methods
df = DataFrame([[1, 1], [2, 2]], index=list('ab'))
expected = DataFrame([[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_tuples([(0.5, 'a'),
(0.5, 'b'),
(0.8, 'a'),
(0.8, 'b')]))
assert expected.index.is_lexsorted()
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_product([[0.5, 0.8], list('ab')]))
result = result.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex(levels=[[0.5, 0.8], ['a', 'b']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]]))
result = result.sort_index()
assert result.index.is_lexsorted()
tm.assert_frame_equal(result, expected)
concatted = pd.concat([df, df], keys=[0.8, 0.5])
result = concatted.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# 14015
df = DataFrame([[1, 2], [6, 7]],
columns=MultiIndex.from_tuples(
[(0, '20160811 12:00:00'),
(0, '20160809 12:00:00')],
names=['l1', 'Date']))
df.columns.set_levels(pd.to_datetime(df.columns.levels[1]),
level=1,
inplace=True)
assert not df.columns.is_lexsorted()
assert not df.columns.is_monotonic
result = df.sort_index(axis=1)
assert result.columns.is_lexsorted()
assert result.columns.is_monotonic
result = df.sort_index(axis=1, level=1)
assert result.columns.is_lexsorted()
assert result.columns.is_monotonic
def test_sort_index_and_reconstruction_doc_example(self):
# doc example
df = DataFrame({'value': [1, 2, 3, 4]},
index=MultiIndex(
levels=[['a', 'b'], ['bb', 'aa']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]]))
assert df.index.is_lexsorted()
assert not df.index.is_monotonic
# sort it
expected = DataFrame({'value': [2, 1, 4, 3]},
index=MultiIndex(
levels=[['a', 'b'], ['aa', 'bb']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]]))
result = df.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# reconstruct
result = df.sort_index().copy()
result.index = result.index._sort_levels_monotonic()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_sort_index_reorder_on_ops(self):
# 15687
df = DataFrame(
np.random.randn(8, 2),
index=MultiIndex.from_product(
[['a', 'b'], ['big', 'small'], ['red', 'blu']],
names=['letter', 'size', 'color']),
columns=['near', 'far'])
df = df.sort_index()
def my_func(group):
group.index = ['newz', 'newa']
return group
result = df.groupby(level=['letter', 'size']).apply(
my_func).sort_index()
expected = MultiIndex.from_product(
[['a', 'b'], ['big', 'small'], ['newa', 'newz']],
names=['letter', 'size', None])
tm.assert_index_equal(result.index, expected)
def test_sort_non_lexsorted(self):
# degenerate case where we sort but don't
# have a satisfying result :<
# GH 15797
idx = MultiIndex([['A', 'B', 'C'],
['c', 'b', 'a']],
[[0, 1, 2, 0, 1, 2],
[0, 2, 1, 1, 0, 2]])
df = DataFrame({'col': range(len(idx))},
index=idx,
dtype='int64')
assert df.index.is_lexsorted() is False
assert df.index.is_monotonic is False
sorted = df.sort_index()
assert sorted.index.is_lexsorted() is True
assert sorted.index.is_monotonic is True
expected = DataFrame(
{'col': [1, 4, 5, 2]},
index=MultiIndex.from_tuples([('B', 'a'), ('B', 'c'),
('C', 'a'), ('C', 'b')]),
dtype='int64')
result = sorted.loc[pd.IndexSlice['B':'C', 'a':'c'], :]
tm.assert_frame_equal(result, expected)
def test_sort_index_nan(self):
# GH 14784
# incorrect sorting w.r.t. nans
tuples = [[12, 13], [np.nan, np.nan], [np.nan, 3], [1, 2]]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(np.arange(16).reshape(4, 4),
index=mi, columns=list('ABCD'))
s = Series(np.arange(4), index=mi)
df2 = DataFrame({
'date': pd.to_datetime([
'20121002', '20121007', '20130130', '20130202', '20130305',
'20121002', '20121207', '20130130', '20130202', '20130305',
'20130202', '20130305'
]),
'user_id': [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
'whole_cost': [1790, np.nan, 280, 259, np.nan, 623, 90, 312,
np.nan, 301, 359, 801],
'cost': [12, 15, 10, 24, 39, 1, 0, np.nan, 45, 34, 1, 12]
}).set_index(['date', 'user_id'])
# sorting frame, default nan position is last
result = df.sort_index()
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position last
result = df.sort_index(na_position='last')
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position first
result = df.sort_index(na_position='first')
expected = df.iloc[[1, 2, 3, 0], :]
tm.assert_frame_equal(result, expected)
# sorting frame with removed rows
result = df2.dropna().sort_index()
expected = df2.sort_index().dropna()
tm.assert_frame_equal(result, expected)
# sorting series, default nan position is last
result = s.sort_index()
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position last
result = s.sort_index(na_position='last')
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position first
result = s.sort_index(na_position='first')
expected = s.iloc[[1, 2, 3, 0]]
tm.assert_series_equal(result, expected)
def test_sort_ascending_list(self):
# GH: 16934
# Set up a Series with a three level MultiIndex
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two'],
[4, 3, 2, 1, 4, 3, 2, 1]]
tuples = lzip(*arrays)
mi = MultiIndex.from_tuples(tuples, names=['first', 'second', 'third'])
s = Series(range(8), index=mi)
# Sort with boolean ascending
result = s.sort_index(level=['third', 'first'], ascending=False)
expected = s.iloc[[4, 0, 5, 1, 6, 2, 7, 3]]
tm.assert_series_equal(result, expected)
# Sort with list of boolean ascending
result = s.sort_index(level=['third', 'first'],
ascending=[False, True])
expected = s.iloc[[0, 4, 1, 5, 2, 6, 3, 7]]
tm.assert_series_equal(result, expected)
| bsd-3-clause |
SSJohns/osf.io | scripts/analytics/addons.py | 18 | 2173 | # -*- coding: utf-8 -*-
import os
import re
import matplotlib.pyplot as plt
from framework.mongo import database
from website import settings
from website.app import init_app
from .utils import plot_dates, oid_to_datetime, mkdirp
log_collection = database['nodelog']
FIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'addons')
mkdirp(FIG_PATH)
ADDONS = [
'box',
'dataverse',
'dropbox',
'figshare',
'github',
'googledrive',
'mendeley',
's3',
'zotero',
]
def get_collection_datetimes(collection, _id='_id', query=None):
query = query or {}
return [
oid_to_datetime(record[_id])
for record in collection.find({}, {_id: True})
]
def analyze_model(model):
dates = get_collection_datetimes(model._storage[0].store)
return {
'dates': dates,
'count': len(dates),
}
def analyze_addon_installs(name):
config = settings.ADDONS_AVAILABLE_DICT[name]
results = {
key: analyze_model(model)
for key, model in config.settings_models.iteritems()
}
return results
def analyze_addon_logs(name):
pattern = re.compile('^{0}'.format(name), re.I)
logs = log_collection.find({'action': {'$regex': pattern}}, {'date': True})
return [
record['date']
for record in logs
]
def analyze_addon(name):
installs = analyze_addon_installs(name)
for model, result in installs.iteritems():
if not result['dates']:
continue
fig = plot_dates(result['dates'])
plt.title('{} configurations: {} ({} total)'.format(name, model, len(result['dates'])))
plt.savefig(os.path.join(FIG_PATH, '{}-installs-{}.png'.format(name, model)))
plt.close()
log_dates = analyze_addon_logs(name)
if not log_dates:
return
fig = plot_dates(log_dates)
plt.title('{} actions ({} total)'.format(name, len(log_dates)))
plt.savefig(os.path.join(FIG_PATH, '{}-actions.png'.format(name)))
plt.close()
def main():
for addon in ADDONS:
if addon in settings.ADDONS_AVAILABLE_DICT:
analyze_addon(addon)
if __name__ == '__main__':
main()
| apache-2.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sklearn/tests/test_preprocessing.py | 2 | 18316 | import numpy as np
import numpy.linalg as la
import scipy.sparse as sp
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises, assert_true, assert_false
from sklearn.utils.sparsefuncs import mean_variance_axis0
from sklearn.preprocessing import Binarizer
from sklearn.preprocessing import KernelCenterer
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import normalize
from sklearn.preprocessing import Scaler
from sklearn.preprocessing import scale
from sklearn import datasets
from sklearn.linear_model.stochastic_gradient import SGDClassifier
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_scaler_1d():
"""Test scaling of dataset along single axis"""
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = Scaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = Scaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
def test_scaler_2d_arrays():
"""Test scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = Scaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = Scaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sp.csr_matrix(X)
scaler = Scaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = Scaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_scaled_back, X)
def test_scaler_without_copy():
"""Check that Scaler.fit does not change input"""
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sp.csr_matrix(X)
X_copy = X.copy()
Scaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
Scaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sp.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, Scaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = Scaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sp.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sp.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sp.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sp.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sp.coo_matrix, sp.csc_matrix, sp.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sp.csr_matrix))
X_norm = toarray(X_norm)
for i in xrange(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sp.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sp.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in xrange(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sp.coo_matrix, sp.csc_matrix, sp.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sp.csr_matrix))
X_norm = toarray(X_norm)
for i in xrange(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize_errors():
"""Check that invalid arguments yield ValueError"""
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, 0]])
for init in (np.array, sp.csr_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
def test_label_binarizer():
lb = LabelBinarizer()
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# two-class case
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 2, 2, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_multilabel():
lb = LabelBinarizer()
# test input as lists of tuples
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
got = lb.fit_transform(inp)
assert_array_equal(indicator_mat, got)
assert_equal(lb.inverse_transform(got), inp)
# test input as label indicator matrix
lb.fit(indicator_mat)
assert_array_equal(indicator_mat,
lb.inverse_transform(indicator_mat))
# regression test for the two-class multilabel case
lb = LabelBinarizer()
inp = [[1, 0], [0], [1], [0, 1]]
expected = np.array([[1, 1],
[1, 0],
[0, 1],
[1, 1]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_equal([set(x) for x in lb.inverse_transform(got)],
[set(x) for x in inp])
def test_label_binarizer_errors():
"""Check that invalid arguments yield ValueError"""
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
def test_label_encoder():
"""Test LabelEncoder's transform and inverse_transform methods"""
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
"""Test fit_transform"""
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_string_labels():
"""Test LabelEncoder's transform and inverse_transform methods with
non-numeric labels"""
le = LabelEncoder()
le.fit(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(le.classes_, ["amsterdam", "paris", "tokyo"])
assert_array_equal(le.transform(["tokyo", "tokyo", "paris"]),
[2, 2, 1])
assert_array_equal(le.inverse_transform([2, 2, 1]),
["tokyo", "tokyo", "paris"])
assert_raises(ValueError, le.transform, ["london"])
def test_label_encoder_errors():
"""Check that invalid arguments yield ValueError"""
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
def test_label_binarizer_iris():
lb = LabelBinarizer()
Y = lb.fit_transform(iris.target)
clfs = [SGDClassifier().fit(iris.data, Y[:, k])
for k in range(len(lb.classes_))]
Y_pred = np.array([clf.decision_function(iris.data) for clf in clfs]).T
y_pred = lb.inverse_transform(Y_pred)
accuracy = np.mean(iris.target == y_pred)
y_pred2 = SGDClassifier().fit(iris.data, iris.target).predict(iris.data)
accuracy2 = np.mean(iris.target == y_pred2)
assert_almost_equal(accuracy, accuracy2)
def test_label_binarizer_multilabel_unlabeled():
"""Check that LabelBinarizer can handle an unlabeled sample"""
lb = LabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_equal(lb.fit_transform(y), Y)
def test_center_kernel():
"""Test that KernelCenterer is equivalent to Scaler in feature space"""
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = Scaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((Scaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
| agpl-3.0 |
wegamekinglc/Finance-Python | PyFin/tests/Math/Accumulators/testAccumulatorsArithmetic.py | 2 | 31296 | # -*- coding: utf-8 -*-
u"""
Created on 2015-7-27
@author: cheng.li
"""
import unittest
import copy
import tempfile
import pickle
import os
import math
import numpy as np
import pandas as pd
from scipy.stats import norm
from PyFin.Math.Accumulators.IAccumulators import Identity
from PyFin.Math.Accumulators.IAccumulators import Exp
from PyFin.Math.Accumulators.IAccumulators import Log
from PyFin.Math.Accumulators.IAccumulators import Sqrt
from PyFin.Math.Accumulators.IAccumulators import Sign
from PyFin.Math.Accumulators.IAccumulators import Abs
from PyFin.Math.Accumulators.IAccumulators import Pow
from PyFin.Math.Accumulators.IAccumulators import Acos
from PyFin.Math.Accumulators.IAccumulators import Acosh
from PyFin.Math.Accumulators.IAccumulators import Asin
from PyFin.Math.Accumulators.IAccumulators import Asinh
from PyFin.Math.Accumulators.IAccumulators import NormInv
from PyFin.Math.Accumulators.IAccumulators import IIF
from PyFin.Math.Accumulators.IAccumulators import Latest
from PyFin.Math.Accumulators.IAccumulators import Ceil
from PyFin.Math.Accumulators.IAccumulators import Floor
from PyFin.Math.Accumulators.IAccumulators import Round
from PyFin.Math.Accumulators.StatefulAccumulators import MovingAverage
from PyFin.Math.Accumulators.StatefulAccumulators import MovingVariance
from PyFin.Math.Accumulators.StatefulAccumulators import MovingMax
from PyFin.Math.Accumulators.StatefulAccumulators import MovingCorrelation
from PyFin.Math.Accumulators.StatelessAccumulators import Sum
from PyFin.Math.Accumulators.StatelessAccumulators import Average
from PyFin.Math.Accumulators.StatelessAccumulators import Min
from PyFin.Math.Accumulators.StatelessAccumulators import Max
class TestAccumulatorsArithmetic(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.sampleOpen = np.random.randn(10000)
self.sampleClose = np.random.randn(10000)
self.sampleRf = np.random.randn(10000)
def testAddedNanValue(self):
m = Max('x')
m.push({'x': 10.0})
m.push({'x': np.nan})
self.assertAlmostEqual(10., m.value)
def testAccumulatorBasic(self):
m = Max('x')
m.push({'x': 10.0})
self.assertAlmostEqual(m.result(), m.value)
def testPlusOperator(self):
ma5 = MovingAverage(5, 'close')
ma20 = MovingAverage(20, 'open')
plusRes = ma5 + ma20
for i, (open, close) in enumerate(zip(self.sampleOpen, self.sampleClose)):
data = {'close': close, 'open': open}
ma5.push(data)
ma20.push(data)
plusRes.push(data)
expected = ma5.result() + ma20.result()
calculated = plusRes.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testRPlusOperator(self):
ma5 = MovingAverage(5, 'close')
ma20 = MovingAverage(20, 'close')
plusRes = 5.0 + MovingAverage(20, 'close')
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma5.push(data)
ma20.push(data)
plusRes.push(data)
expected = 5.0 + ma20.result()
calculated = plusRes.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testSubOperator(self):
ma5 = MovingAverage(5, 'close')
sumTotal = Sum('open')
subRes = MovingAverage(5, 'close') - Sum('open')
for i, (open, close) in enumerate(zip(self.sampleOpen, self.sampleClose)):
data = {'close': close, 'open': open}
ma5.push(data)
sumTotal.push(data)
subRes.push(data)
expected = ma5.result() - sumTotal.result()
calculated = subRes.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testRSubOperator(self):
ma20 = MovingAverage(20, 'close')
sumTotal = Sum('close')
subRes = 5.0 - MovingAverage(20, 'close')
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma20.push(data)
sumTotal.push(data)
subRes.push(data)
expected = 5.0 - ma20.result()
calculated = subRes.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testMultiplyOperator(self):
mv5 = MovingVariance(5, 'close')
average = Average('open')
mulRes = MovingVariance(5, 'close') * Average('open')
for i, (open, close) in enumerate(zip(self.sampleOpen, self.sampleClose)):
data = {'close': close, 'open': open}
mv5.push(data)
average.push(data)
mulRes.push(data)
if i >= 1:
expected = mv5.result() * average.result()
calculated = mulRes.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testRMultiplyOperator(self):
ma20 = MovingAverage(20, 'close')
average = Average('open')
mulRes = 5.0 * MovingAverage(20, 'close')
for i, (open, close) in enumerate(zip(self.sampleOpen, self.sampleClose)):
data = {'close': close, 'open': open}
average.push(data)
ma20.push(data)
mulRes.push(data)
expected = 5.0 * ma20.result()
calculated = mulRes.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testDivOperator(self):
mc5 = MovingCorrelation(5, 'open', 'close')
minum = Min('open')
divRes = Min('open') / MovingCorrelation(5, 'open', 'close')
for i, (open, close) in enumerate(zip(self.sampleOpen, self.sampleClose)):
data = {'close': close, 'open': open}
mc5.push(data)
minum.push(data)
divRes.push(data)
if i >= 1:
expected = minum.result() / mc5.result()
calculated = divRes.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testRDivOperator(self):
ma20 = MovingAverage(20, 'close')
divRes = 5.0 / MovingAverage(20, 'close')
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma20.push(data)
divRes.push(data)
expected = 5.0 / ma20.result()
calculated = divRes.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testMultipleOperators(self):
ma20 = MovingAverage(20, 'close')
ma120 = MovingAverage(120, 'close')
mmax = MovingMax(50, 'open')
res = (MovingAverage(20, 'close') - MovingAverage(120, 'close')) / MovingMax(50, 'open')
for i, (open, close) in enumerate(zip(self.sampleOpen, self.sampleClose)):
data = {'close': close, 'open': open}
ma20.push(data)
ma120.push(data)
mmax.push(data)
res.push(data)
expected = (ma20.result() - ma120.result()) / mmax.result()
calculated = res.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testNegativeOperator(self):
ma20 = MovingAverage(20, 'close')
negma20 = -ma20
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma20.push(data)
negma20.push(data)
expected = -ma20.result()
calculated = negma20.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testCompoundedOperator(self):
ma5 = MovingAverage(5, 'x')
maxer = Max('close')
max5ma = Max('close') >> MovingAverage(5, 'max')
max5ma2 = MovingAverage(5, Max('close'))
for i, close in enumerate(self.sampleClose):
data = {'close': close, 'open': 1.}
maxer.push(data)
data2 = {'x': maxer.result()}
ma5.push(data2)
max5ma.push(data)
max5ma2.push(data)
expected = ma5.result()
calculated = max5ma.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
calculated = max5ma2.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
with self.assertRaises(ValueError):
_ = Max('close') >> math.sqrt
def testLessOrEqualOperators(self):
m1 = Max('x')
m2 = Min('x')
cmp = m1 <= m2
cmp.push(dict(x=1.0))
self.assertEqual(True, cmp.result())
cmp.push(dict(x=2.0))
self.assertEqual(False, cmp.result())
def testLessOperator(self):
m1 = Min('x')
m2 = Max('x')
cmp = m1 < m2
cmp.push(dict(x=1.0))
self.assertEqual(False, cmp.result())
cmp.push(dict(x=2.0))
self.assertEqual(True, cmp.result())
def testGreaterOrEqualOperator(self):
m1 = Min('x')
m2 = Max('x')
cmp = m1 >= m2
cmp.push(dict(x=1.0))
self.assertEqual(True, cmp.result())
cmp.push(dict(x=2.0))
self.assertEqual(False, cmp.result())
def testGreaterOperator(self):
m1 = Max('x')
m2 = Min('x')
cmp = m1 > m2
cmp.push(dict(x=1.0))
self.assertEqual(False, cmp.result())
cmp.push(dict(x=2.0))
self.assertEqual(True, cmp.result())
def testExpFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Exp(MovingAverage(5, 'close'))
holder2 = MovingAverage(5, 'close') >> Exp
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
holder2.push(data)
expected = math.exp(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
calculated = holder2.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testLogFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Log(ma5)
sampleClose = np.exp(self.sampleClose)
for i, close in enumerate(sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = math.log(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testSignFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Sign(ma5)
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = 1 if ma5.result() >= 0 else -1
calculated = holder.result()
self.assertEqual(calculated, expected, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testSqrtFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Sqrt(ma5)
sampleClose = np.square(self.sampleClose)
for i, close in enumerate(sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = math.sqrt(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testAbsFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Abs(ma5)
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = abs(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testPowFunction(self):
ma5min = MovingAverage(5, 'close') >> Min
holder = Pow(ma5min, 3)
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma5min.push(data)
holder.push(data)
expected = math.pow(ma5min.result(), 3)
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testAcosFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Acos(ma5)
sampleClose = np.cos(self.sampleClose)
for i, close in enumerate(sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = math.acos(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testAcoshFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Acosh(ma5)
sampleClose = np.cosh(self.sampleClose)
for i, close in enumerate(sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = math.acosh(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testAsinFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Asin(ma5)
sampleClose = np.sin(self.sampleClose)
for i, close in enumerate(sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = math.asin(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testAsinhFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Asinh(ma5)
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = math.asinh(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testNormInvFunction(self):
ma5 = MovingAverage(5, 'close')
holder = NormInv(ma5)
sampleClose = norm.cdf(self.sampleClose)
for i, close in enumerate(sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = norm.ppf(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 6, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
holder = NormInv(ma5, fullAcc=True)
sampleClose = norm.cdf(self.sampleClose)
for i, close in enumerate(sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = norm.ppf(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testCeilFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Ceil(ma5)
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = math.ceil(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testFloorFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Floor(ma5)
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = math.floor(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testRoundFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Round(ma5)
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = round(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testArithmeticFunctionsDeepcopy(self):
data = {'x': 1}
test = Exp('x')
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(math.exp(data['x']), copied.value)
test = Log('x')
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(math.log(data['x']), copied.value)
test = Sqrt('x')
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(math.sqrt(data['x']), copied.value)
data['x'] = -1.
test = Pow('x', 2)
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(data['x'] ** 2, copied.value)
test = Abs('x')
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(abs(data['x']), copied.value)
test = Sign('x')
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(-1., copied.value)
data['x'] = 1.
test = Acos('x')
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(math.acos(data['x']), copied.value)
test = Asin('x')
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(math.asin(data['x']), copied.value)
test = Acosh('x')
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(math.acosh(data['x']), copied.value)
test = Asinh('x')
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(math.asinh(data['x']), copied.value)
def testArithmeticFunctionsPickle(self):
data = {'x': 1}
test = Exp('x')
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
test = Log('x')
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
test = Sqrt('x')
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
data['x'] = -1.
test = Pow('x', 2)
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
test = Abs('x')
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
test = Sign('x')
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
data['x'] = 1.
test = Acos('x')
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
test = Asin('x')
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
test = Acosh('x')
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
test = Asinh('x')
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
def testAccumulatorTransform(self):
window = 5
ma5 = MovingAverage(window, 'close')
df = pd.DataFrame(self.sampleClose, columns=['close'])
res = ma5.transform(df, name='my_factor')[window-1:]
expected = df.rolling(window).mean()[window - 1:]['close']
self.assertEqual(res.name, 'my_factor')
np.testing.assert_array_almost_equal(res, expected)
def testIIFAccumulator(self):
iif = IIF(Latest('rf') > 0, 'close', 'open')
for i, close in enumerate(self.sampleClose):
data = {'close': close,
'open': self.sampleOpen[i],
'rf': self.sampleRf[i]}
iif.push(data)
if data['rf'] > 0:
self.assertAlmostEqual(iif.result(), data['close'])
else:
self.assertAlmostEqual(iif.result(), data['open'])
def testIdentityStr(self):
s = Identity(2.)
self.assertEqual('2.0', str(s))
def testLatestStr(self):
s = Latest('roe')
self.assertEqual("''\\text{roe}''", str(s))
def testExpStr(self):
s = Exp('roe')
self.assertEqual("\exp(''\\text{roe}'')", str(s))
def testLogStr(self):
s = Log('roe')
self.assertEqual("\ln(''\\text{roe}'')", str(s))
def testSqrtStr(self):
s = Sqrt('roe')
self.assertEqual("\sqrt{''\\text{roe}''}", str(s))
def testPowStr(self):
s = Pow('roe', 3)
self.assertEqual("''\\text{roe}'' ^ {3.0}", str(s))
def testAbsStr(self):
s = Abs('roe')
self.assertEqual("\\left| ''\\text{roe}'' \\right|", str(s))
def testSignStr(self):
s = Sign('roe')
self.assertEqual("\mathrm{sign}(''\\text{roe}'')", str(s))
def testAcosStr(self):
s = Acos('roe')
self.assertEqual("\mathrm{ACos}(''\\text{roe}'')", str(s))
def testAcoshStr(self):
s = Acosh('roe')
self.assertEqual("\mathrm{ACosh}(''\\text{roe}'')", str(s))
def testAsinStr(self):
s = Asin('roe')
self.assertEqual("\mathrm{ASin}(''\\text{roe}'')", str(s))
def testAsinhStr(self):
s = Asinh('roe')
self.assertEqual("\mathrm{ASinh}(''\\text{roe}'')", str(s))
def testNormInvStr(self):
s = NormInv('roe')
self.assertEqual("\mathrm{NormInv}(''\\text{roe}'', fullAcc=0)", str(s))
def testNegStr(self):
s = -Asinh('roe')
self.assertEqual("-\mathrm{ASinh}(''\\text{roe}'')", str(s))
def testAddedStr(self):
s = Latest('x') + Latest('y')
self.assertEqual("''\\text{x}'' + ''\\text{y}''", str(s))
def testMinusStr(self):
s = Latest('x') - Latest('y')
self.assertEqual("''\\text{x}'' - ''\\text{y}''", str(s))
def testMultiplyStr(self):
s = Latest('x') * Latest('y')
self.assertEqual("''\\text{x}'' \\times ''\\text{y}''", str(s))
s = (Latest('x') + Latest('y')) * (Latest('x') - Latest('y'))
self.assertEqual("(''\\text{x}'' + ''\\text{y}'') \\times (''\\text{x}'' - ''\\text{y}'')", str(s))
def testDividedStr(self):
s = (Latest('x') + Latest('y')) / (Latest('x') - Latest('y'))
self.assertEqual("\\frac{''\\text{x}'' + ''\\text{y}''}{''\\text{x}'' - ''\\text{y}''}", str(s))
def testLtOperatorStr(self):
s = (Latest('x') + Latest('y')) < (Latest('x') - Latest('y'))
self.assertEqual("(''\\text{x}'' + ''\\text{y}'') \lt (''\\text{x}'' - ''\\text{y}'')", str(s))
def testLeOperatorStr(self):
s = (Latest('x') * Latest('y')) <= (Latest('x') - Latest('y'))
self.assertEqual("(''\\text{x}'' \\times ''\\text{y}'') \le (''\\text{x}'' - ''\\text{y}'')", str(s))
def testGeOperatorStr(self):
s = (Latest('x') * Latest('y')) >= (Latest('x') - Latest('y'))
self.assertEqual("(''\\text{x}'' \\times ''\\text{y}'') \ge (''\\text{x}'' - ''\\text{y}'')", str(s))
def testGtOperatorStr(self):
s = (Latest('x') * Latest('y')) > (Latest('x') - Latest('y'))
self.assertEqual("(''\\text{x}'' \\times ''\\text{y}'') \gt (''\\text{x}'' - ''\\text{y}'')", str(s))
def testEqOperatorStr(self):
s = (Latest('x') * Latest('y')) == (Latest('x') - Latest('y'))
self.assertEqual("(''\\text{x}'' \\times ''\\text{y}'') = (''\\text{x}'' - ''\\text{y}'')", str(s))
def testNeqOperatorStr(self):
s = (Latest('x') * Latest('y')) != (Latest('x') - Latest('y'))
self.assertEqual("(''\\text{x}'' \\times ''\\text{y}'') \\neq (''\\text{x}'' - ''\\text{y}'')", str(s))
| mit |
bobmyhill/burnman | examples/example_seismic.py | 2 | 6866 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_seismic
---------------
Shows the various ways to input seismic models
(:math:`V_s, V_p, V_{\\phi}, \\rho`) as a
function of depth (or pressure) as well as different velocity model libraries
available within Burnman:
1. PREM :cite:`dziewonski1981`
2. STW105 :cite:`kustowski2008`
3. AK135 :cite:`kennett1995`
4. IASP91 :cite:`kennett1991`
This example will first calculate or read in a seismic model and plot the
model along the defined pressure range. The example also illustrates how to
import a seismic model of your choice,
here shown by importing AK135 :cite:`kennett1995`.
*Uses:*
* :doc:`seismic`
*Demonstrates:*
* Utilization of library seismic models within BurnMan
* Input of user-defined seismic models
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import burnman_path # adds the local burnman directory to the path
import burnman
import warnings
assert burnman_path # silence pyflakes warning
if __name__ == "__main__":
# List of seismic 1D models
models = [burnman.seismic.PREM(), burnman.seismic.STW105(),
burnman.seismic.AK135(), burnman.seismic.IASP91()]
colors = ['r', 'b', 'm', 'k']
# Variables to plot
variables = ['pressure', 'gravity', 'v_p', 'v_s', 'v_phi', 'density']
units = ['Pa', 'm/s^2', 'm/s', 'm/s', 'm/s', 'kg/m^3', 'Pa', 'm/s^2']
plt.figure(figsize=(10, 9))
# Run through models and variables
for variable_index in range(len(variables)):
ax = plt.subplot(3, 2, variable_index + 1)
for model_index in range(len(models)):
# specify where we want to evaluate, here we map from pressure
# to depth
# 1. format p = np.arange (starting pressure, ending pressure,
# pressure step) (in Pa)
# p = np.arange(1.0e9,360.0e9,1.e9)
# depths = np.array([models[model_index].depth(pr) for pr in p])
# 2. we could also just specify some depth levels directly like
# this
# depths = np.arange(700e3,2800e3,100e3)
# 3. we could also use the data points where the seismic model is
# specified over a depth range,
# this will bring out any discontinuities
# this is the preferred way to plot seismic discontinuities
# correctly
depths = models[model_index].internal_depth_list(mindepth=0,
maxdepth=6371e3)
# now evaluate everything at the given depths levels
# (using linear interpolation)
# try to get and plot values for given model, if this fails the
# variable is likely not defined for that model
try:
with warnings.catch_warnings(record=True) as wrn:
values = getattr(models[model_index],
variables[variable_index])(depths)
if (len(wrn) == 1) or (len(wrn) == 2):
for w in wrn:
print(w.message)
elif len(wrn) > 2:
raise Exception('Unexpected number of warnings')
plt.plot(depths / 1.e3, values, color=colors[model_index],
linestyle='-',
label=models[model_index].__class__.__name__)
except ValueError:
# write out warning that the variable failed for given
# model
print(variables[variable_index]
+ ' is not defined for '
+ models[model_index].__class__.__name__)
plt.title(variables[variable_index])
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
if variable_index == 3:
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
if variable_index > 3:
plt.xlabel('depth in km')
plt.ylabel(units[variable_index])
plt.gca().set_xticks([660, 2891, 5150])
plt.show()
# Alternatively one is able to evaluate all the variables for a model in a
# single line
eval = models[0].evaluate(['pressure', 'gravity',
'v_p', 'v_s', 'v_phi', 'density'],
models[0].internal_depth_list(mindepth=-1.e3,
maxdepth=6372.1e3))
pressure, gravity, v_p, v_s, v_phi, density = eval
# The following shows how to read in your own model from a file
# Model needs to be defined with increasing depth and decreasing radius.
# In this case the table is switched.
class ak135_table(burnman.seismic.SeismicTable):
def __init__(self):
burnman.seismic.SeismicTable.__init__(self)
# In format: radius, pressure, density, v_p, v_s
table = burnman.tools.read_table(
"input_seismic/ak135_lowermantle.txt")
table = np.array(table)
self.table_radius = table[:, 0][::-1]
self.table_pressure = table[:, 1][::-1]
self.table_density = table[:, 2][::-1]
self.table_vp = table[:, 3][::-1]
self.table_vs = table[:, 4][::-1]
# self.table_depth needs to be defined and needs to be increasing
self.table_depth = self.earth_radius - self.table_radius
ak = ak135_table()
# specify where we want to evaluate, here we map from pressure to depth
depths = np.linspace(700e3, 2800e3, 40)
# now evaluate everything at the given depths levels (using interpolation)
pressures, density, v_p, v_s, v_phi = ak.evaluate(
['pressure', 'density', 'v_p', 'v_s', 'v_phi'], depths)
# plot vs and vp and v_phi (note that v_phi is computed!)
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.title('ak135')
plt.plot(depths / 1.e3, v_p / 1.e3, '+-r', label='v_p')
plt.plot(depths / 1.e3, v_s / 1.e3, '+-b', label='v_s')
plt.plot(depths / 1.e3, v_phi / 1.e3, '--g', label='v_phi')
plt.legend(loc='lower left')
plt.xlabel('depth in km')
plt.ylabel('km/s')
# plot pressure,density vs depth from prem:
plt.subplot(1, 2, 2)
plt.title('ak135')
plt.plot(depths / 1.e3, pressures / 1.e9, '-r', label='pressure')
plt.ylabel('GPa')
plt.xlabel('depth in km')
plt.legend(loc='upper left')
plt.twinx()
plt.ylabel('g/cc')
plt.plot(depths / 1.e3, density / 1.e3, '-b', label='density')
plt.legend(loc='lower right')
plt.show()
| gpl-2.0 |
azvoleff/pyabm | pyabm/rcsetup.py | 1 | 29166 | # Copyright 2009-2013 Alex Zvoleff
#
# This file is part of the pyabm agent-based modeling toolkit.
#
# pyabm is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pyabm is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# pyabm. If not, see <http://www.gnu.org/licenses/>.
#
# See the README.rst file for author contact information.
"""
Sets up parameters for a model run. Used to read in settings from any provided
rc file, and set default values for any parameters that are not provided in the
rc file.
.. note:: The rcsetup.py functionality used in ChitwanABM was originally based
off of the the rcsetup.py module used in matplotlib.
"""
from __future__ import division
import os
import sys
import tempfile
import copy
import logging
import inspect
from pkg_resources import resource_string
import numpy as np
logger = logging.getLogger(__name__)
class KeyError(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
def validate_float(s):
'convert s to float or raise'
try:
if type(s) == str:
return float(eval(s))
else:
return float(s)
except NameError, ValueError:
raise ValueError('Could not convert "%s" to float'%s)
def validate_int(s):
'convert s to int or raise'
try:
if type(s) == str:
ret = int(eval(s))
else:
ret = int(s)
except NameError:
raise ValueError('Could not convert "%s" to int'%s)
if ret != float(s):
raise ValueError('"%s" is not an int'%s)
return ret
def validate_string(s):
'convert s to string'
try:
if type(s) == str:
ret = s
else:
ret = str(s)
except NameError:
raise ValueError('Could not convert "%s" to string'%s)
if ret != str(s):
raise ValueError('"%s" is not a string'%s)
ret = ret.strip("\"' ")
return ret
def validate_string_list(s):
try:
if type(s) != list:
s = s.strip('( )\'"')
s = s.split(',')
s = list(s)
except NameError:
raise TypeError('Could not convert "%s" to list of strings'%s)
s = [validate_string(item) for item in s]
return s
def validate_unit_interval(s):
"Checks that s is a number between 0 and 1, inclusive, or raises an error."
s = validate_float(s)
if s < 0 or s > 1:
raise ValueError('"%s" is not on the closed unit interval [0,1]'%s)
return s
def validate_readable_file(s):
"""Checks that a file exists and is readable."""
if (type(s) != str):
raise TypeError("%s is not a readable file"%s)
if not os.path.exists(s):
raise IOError("%s does not exist"%s)
if not os.path.isfile(s):
raise IOError("%s is not a readable file"%s)
try:
file = open(s, 'r')
file.readline()
file.close()
except OSError:
raise OSError("error reading file %s"%s)
return s
def validate_git_binary(s):
if s.lower() == 'none':
logger.warn("git version control features disabled. Specify valid git binary path in your pyabmrc to enable.")
return None
else:
return validate_readable_file(s)
def validate_Rscript_binary(s):
if s.lower() == 'none':
logger.warn("Rscript access disabled. Specify valid Rscript binary path in your pyabmrc to enable.")
return None
else:
return validate_readable_file(s)
def validate_batchrun_python_binary(s):
if s.lower() == 'none':
logger.warn("Parallel features are disabled. Specify valid python binary path in your pyabmrc to enable.")
return None
else:
return validate_readable_file(s)
def validate_tail_binary(s):
if s.lower() == 'none':
logger.warn("Log 'tailing' disabled. Specify valid tail binary path (or path to equivalent program) in your pyabmrc to enable live tailing of ABM logs.")
return None
else:
return validate_readable_file(s)
def validate_readable_file_warning(s):
"""
Checks that a file exists and is readable. Only logs a warning if the
file is not readable (does not raise error).
"""
if (type(s) != str):
logger.warn("%s is not a readable file"%s)
return s
if not os.path.exists(s):
logger.warn("%s does not exist"%s)
return s
try:
file = open(s, 'r')
file.readline()
file.close()
except IOError:
logger.warn("error reading file %s"%s)
return s
def validate_readable_dir(s):
"""
Checks that a directory exists and is readable. Fails if the directory does
not exist or if s is not a string.
"""
if (type(s) != str):
raise TypeError("%s is not a readable directory"%s)
if not os.path.isdir(s):
raise TypeError("%s is not a directory"%s)
try:
ls = os.listdir(s)
except:
raise OSError("cannot read directory %s"%s)
return s
def validate_writable_dir(s):
"""
Checks that a directory exists and is writable. Fails if the directory does
not exist or if s is not a string
"""
if (type(s) != str):
raise TypeError("%s is not a writable directory"%s)
if not os.path.exists(s):
raise IOError("%s does not exist"%s)
try:
t = tempfile.TemporaryFile(dir=s)
t.write('1')
t.close()
except OSError:
raise OSError("cannot write to directory %s"%s)
return s
class validate_nseq_float:
def __init__(self, n):
self.n = n
def __call__(self, s):
'return a seq of n floats or raise'
if type(s) is str:
ss = s.strip('( )')
ss = ss.split(',')
if len(ss) != self.n:
raise ValueError('You must supply exactly %d comma separated values'%self.n)
try:
return [float(val) for val in ss]
except ValueError:
raise ValueError('Could not convert all entries to floats')
else:
assert type(s) in (list, tuple)
if len(s) != self.n:
raise ValueError('You must supply exactly %d values'%self.n)
return [float(val) for val in s]
class validate_nseq_int:
def __init__(self, n):
self.n = n
def __call__(self, s):
'return a seq of n ints or raise'
if type(s) is str:
ss = s.strip('( )')
ss = ss.split(',')
if len(ss) != self.n:
raise ValueError('You must supply exactly %d comma separated values'%self.n)
try:
return [int(val) for val in ss]
except ValueError:
raise ValueError('Could not convert all entries to ints')
else:
assert type(s) in (list,tuple)
if len(s) != self.n:
raise ValueError('You must supply exactly %d values'%self.n)
return [int(val) for val in s]
def validate_boolean(s):
if s in [True, False]:
return s
elif s.lower() == 'true':
return True
elif s.lower() == 'false':
return False
else:
raise TypeError("%s is not a boolean"%s)
def validate_time_units(s):
if (type(s) != str):
raise TypeError("%s is not a valid unit of time"%s)
if s.lower() in ['months', 'years', 'decades']:
return s.lower()
else:
raise ValueError("%s is not a valid unit of time"%s)
def validate_random_seed(s):
if s == 'None' or s == None:
return None
else:
return validate_int(s)
class validate_probability:
"""
Validates a probability specified as a dictionary where each key is a tuple
specifying the interval to which the probability applies (in
probability_time_units). The interval tuple is specified as::
[lower, upper)
(closed interval on the lower bound, open interval on the upper), and the
value specified for each inteval tuple key is the probability for that
interval.
The 'min', 'max' values passed to the validate_probability function give
the minimum (inclusive) and maximum values (exclusive) for which
probabilities must be specified. validate_probability will check that
probabilities are specified for all values of t between this minimum and
maximum value, including the minimum value ('min') in [min, max) and up to
but excluding the maximum value 'max'.
This function validates the probabilities lie on the unit interval, and
then returns a dictionary object where there is a key for each age value in
the interval specified. Therefore,::
{(0,2):.6, (2,5):.9}
would be converted to::
{0:.6, 1:.6, 2:.9, 3:.9, 4:.9}
"""
def __init__(self, min, max):
self.min = min
self.max = max
def __call__(self, s):
error_msg = """Invalid probability parameter dictionary: %s
probabilities must be specified in a dictionary of key, value pairs in the
following format:
(lower_limit, upper_limit) : probability
probabilities apply to the interval [lower_limit, upper_limit), including the
lower limit, and excluding the upper limit. The units in which the
lower and upper limits are specified should be consistent with the
units of time specified by the probability_time_units rc parameter."""
try:
if type(s) == str:
input = eval(s)
else:
input = s
except TypeError:
raise TypeError(error_msg%(s))
except SyntaxError:
raise SyntaxError(error_msg%(s))
if type(input) != dict:
raise SyntaxError(error_msg%(s))
probability_dict = {}
key_converter_tuple = validate_nseq_int(2)
for item in input.iteritems():
# First convert the probability interval tuple (item[0]) from a string
# to a length 2 tuple of ints
# Validate that key is a length 2 tuple
key = key_converter_tuple(item[0])
# Now process the key and values, and check that they fall within
# the specified overall interval for this probability type
lower_lim, upper_lim = validate_int(key[0]), validate_int(key[1])
if lower_lim > upper_lim:
raise ValueError("lower_lim > upper_lim for probability dictionary key '(%s, %s)'."%(key))
elif lower_lim == upper_lim:
raise ValueError("lower_lim = upper_lim for probability dictionary key '(%s, %s)'."%(key))
probability = validate_unit_interval(item[1])
for t in xrange(lower_lim, upper_lim):
if t in probability_dict:
raise ValueError("probability is specified twice for dictionary key '%s'."%(t))
probability_dict[t] = probability
for key in probability_dict.keys():
if key < self.min or key >= self.max:
raise ValueError("A probability is given for a time outside the \
specified overall probability interval.\nA probability is given for time %s, but the overall \
probability interval is [%s, %s)."%(key, self.min, self.max))
return probability_dict
def validate_prob_dist(s):
# TODO: Finish documenting this section.
"""
Validates a probability distribution specified as a dictionary where each
key is a tuple specifying the interval to which the probability applies (in
probability_time_units).
"""
error_msg = """
Invalid probability distribution parameter tuple: %s
Probability distributions must be specified in a length two tuple
in the following format:
([a, b, c, d], [1, 2, 3])
where a, b, c, and d are bin limits, and 1, 2, and 3 are the probabilities
assigned to each bin. Notice one more bin limit must be specified than the
number of probabilities given (to close the interval).
"""
try:
if type(s) == str:
prob_dist_tuple = eval(s)
else:
prob_dist_tuple = s
except TypeError:
raise TypeError(error_msg%(s))
except SyntaxError:
raise SyntaxError(error_msg%(s))
if type(prob_dist_tuple) != tuple:
raise SyntaxError(error_msg%(s))
if not (len(prob_dist_tuple[0])==2 and type(prob_dist_tuple[1])==int) and \
(len(prob_dist_tuple[0]) != (len(prob_dist_tuple[1]) + 1)):
# The first clause of the above if statement is to catch the
# case where the probability distribution is over a single
# interval. Here we need to make sure the probability (for
# which there is only one value in this case) is stored as a
# length 1 tuple as this is the format expected by the
# statistics functions.
raise SyntaxError("Length of probability tuple must be 1 less than the length of the bin limit tuple - error reading %s"%key)
return prob_dist_tuple
def validate_time_bounds(values):
"""Converts and validates the start and stop time for the model. Checks to
ensure consistency, and rejects unlikely inputs, like years < minyear or >
maxyear ."""
minyear, maxyear = 1990, 2201
values = values.replace(' ', '')
try:
values = values.split('),(')
except IndexError:
raise IndexError(error_msg)
if len(values) > 2:
raise ValueError(error_msg)
bounds = []
for date in values:
date = date.strip('()').split(',')
bound = []
if len(date) > 2:
raise ValueError(error_msg)
for item in date:
try:
bound.append(validate_int(item))
except ValueError, msg:
raise ValueError("Invalid date. In model start/stop time, a [year, month] date of \
%s is given. %s"%(date, msg))
if len(bound) == 2:
# len(bound)=2 means a year and month are specified, as (year,
# month). So validate that the second item in bound, the number of
# the month, is between 1 and 12
if bound[1] < 1 or bound[1] > 12:
raise ValueError("In model start/stop time, a month number of \
%s is given. The month number must be an integer >=1 and <= 12"%bound[1])
if bound[0] < minyear or bound[0] > maxyear:
# These year limits are to avoid accidentally incorrect entries. If
# the model is actually supposed to be run beyond these limits,
# these limits on the max/min year can be changed.
raise ValueError("In model start/stop time, a year of \
%s is given. The year must be an integer >=%sand <= %s"%(bound[0], minyear, maxyear))
bounds.append(bound)
if len(bounds[0])==1 or len(bounds[1])==1:
raise ValueError("In model start/stop time, no month is specified.")
# Check that start and stop dates are valid:
if (bounds[0][0] == bounds[1][0] and bounds[0][1] >= bounds[1][1]) or \
(bounds[0][0] > bounds[1][0]):
raise ValueError("Specified model start time is >= model stop time.")
return bounds
def novalidation(s):
"Performs no validation on object. (used in testing)."
return s
def _get_home_dir():
"""
Find user's home directory if possible. Otherwise raise error.
see: http://mail.python.org/pipermail/python-list/2005-February/263921.html
"""
path=''
try:
path=os.path.expanduser("~")
except:
pass
if not os.path.isdir(path):
for evar in ('HOME', 'USERPROFILE', 'TMP'):
try:
path = os.environ[evar]
if os.path.isdir(path):
break
except: pass
if path:
return path
else:
raise RuntimeError('Error finding user home directory: \
please define environment variable $HOME')
class RcParams(dict):
"""
A dictionary object including validation
"""
def __init__(self, validation=True, *args):
self._validation = validation
dict.__init__(self, *args)
self._validation_dict = None
# self.original_value stores the unconverted strings representing the
# originally input values (prior to conversion). This allows printing
# to an rc file the original values given by a user or rc file without
# running into problems with errors due to machine precision while
# doing floating point -> string -> floating point conversions
self.original_value = {}
def setup_validation(self, rcparams_defaults_dict):
self._validation_dict = dict([(key, converter) for key, (default,
converter) in rcparams_defaults_dict.iteritems()])
def __setitem__(self, key, val):
self.original_value[key] = val
if self._validation:
try:
cval = self._validation_dict[key](val)
dict.__setitem__(self, key, cval)
except KeyError, msg:
raise KeyError('%s is not a valid rc parameter. \
See rcParams.keys() for a list of valid parameters. %s'%(key, msg))
else:
dict.__setitem__(self, key, val)
def validate_items(self):
for key, val in self.original_value.iteritems():
try:
cval = self._validation_dict[key](val)
dict.__setitem__(self, key, cval)
except KeyError, msg:
logger.error('problem processing %s rc parameter. %s'%(key, msg))
def parse_rcparams_defaults(module_name):
"""
Parses the pyabm rcparams.defaults file as well as the rcparams.defaults
file (if any) for the calling module. Returns a list of tuples:
(filename, linenum, key, value, comment)
"""
parsed_lines = []
key_dict = {}
try:
rcparams_lines = resource_string(module_name, 'rcparams.default').splitlines()
except IOError:
raise IOError('ERROR: Could not open rcparams.defaults file in %s'%module_name)
logger.debug("Loading rcparams.defaults from %s"%module_name)
for preamble_linenum in xrange(1, len(rcparams_lines)):
if rcparams_lines[preamble_linenum] == "###***START OF RC DEFINITION***###":
break
for linenum in xrange((preamble_linenum + 1), len(rcparams_lines)):
# Remove linebreak
line = rcparams_lines[linenum]
# Pull out key, and strip single quotes, double quotes and blank
# spaces
comment = ''.join(line.partition("#")[1:3])
line = ''.join(line.partition("#")[0])
key = ''.join(line.partition(":")[0].strip("\'\" "))
# Now pull out value and converter
value_validation_tuple = line.partition(':')[2].partition("#")[0].strip(", ")
value = value_validation_tuple.rpartition("|")[0].strip("[]\"\' ")
converter = value_validation_tuple.rpartition("|")[2].strip("[]\"\' ")
if key != '':
if key in key_dict:
logger.warn("Duplicate values for %s are provided in %s rcparams.default."%(key, module_name))
# Convert 'converter' from a string to a reference to the
# validation object
converter = eval(converter)
key_dict[key] = (value, converter)
parsed_lines.append((module_name, linenum, key, value, comment))
return parsed_lines, key_dict
def read_rc_file(default_params, fname=os.path.basename(os.getcwd()) +'rc'):
"""
Returns an RcParams instance containing the the keys / value combinations
read from an rc file. The rc file name defaults to the module name plus 'rc'.
"""
rcfile_params = RcParams(validation=True)
rcfile_params.setup_validation(default_params)
cnt = 0
for line in file(fname):
cnt += 1
strippedline = line.split('#',1)[0].strip()
if not strippedline: continue
tup = strippedline.split(':',1)
if len(tup) !=2:
logger.warn('illegal line #%d in file "%s"'%(cnt, fname))
continue
key, val = tup
key = key.strip()
val = val.strip()
if key in rcfile_params:
logger.warn('duplicate key in file "%s", line #%d'%(fname,cnt))
# Validate the values read in from the rc file
try:
rcfile_params[key] = val # try to convert to proper type or raise
except Exception, msg:
logger.warning('Failure while reading rc parameter %s on line %d in %s: %s. Reverting to default parameter value.'%(key, cnt, fname, msg))
return rcfile_params
class rc_params_management():
"""
This class manages the RcParams instance used by pyabm and shared by any
calling modules.
"""
def __init__(self):
self._initialized = False
self._validated = False
self._rcParams = None
self._default_parsed_lines = None
self._default_rcparams_dict = None
self.load_default_params(__name__)
def load_default_params(self, module_name):
"""
Load the rcparams_defaults into a dictionary, which will be used to tie
keys to converters in the definition of the RcParams class.
The function can be called repeatedly to load new defaults (from
different modules who share the same rc_params_management instance, for
example). If the function is called more than once, any default
parameters that are not explicitly overwritten will be left unchanged.
"""
default_parsed_lines, default_rcparams_dict = parse_rcparams_defaults(module_name)
if self._default_parsed_lines == None and self._default_rcparams_dict == None:
self._default_parsed_lines = default_parsed_lines
self._default_rcparams_dict = default_rcparams_dict
else:
self._default_parsed_lines.extend(default_parsed_lines)
self._default_rcparams_dict.update(default_rcparams_dict)
# Convert the rcparams_defaults dictionary into an RcParams
# instance. This process will also validate that the values in
# rcparams_defaults are valid by using the validation function
# specified in rcparams_defaults to convert each parameter value.
if self._rcParams == None:
self._rcParams = RcParams(validation=False)
self._rcParams.setup_validation(self._default_rcparams_dict)
self._rcParams._validation = False
for key, (default, converter) in default_rcparams_dict.iteritems():
try:
self._rcParams[key] = default
except Exception, msg:
raise Exception("ERROR: Problem processing rcparams.default key '%s'. %s"%(key, msg))
self._rcParams._validation = True
def is_initialized(self):
return self._initialized
def is_validated(self):
return self._validated
def validate_params(self):
self._rcParams.validate_items()
self._validated = True
def get_params(self):
if not self.is_initialized():
logger.warning("rcparams not yet initialized")
if not self.is_validated():
logger.warning("rcparams not yet validated - must call validate_params")
return self._rcParams
def initialize(self, module_name, custom_rc_file=None):
"""
Loads rcParams by first starting with the default parameter values from
rcparams.default (already stored in the attribute 'default_params', and
then by checking for an rc file in:
1) the path specified by the 'custom_rc_file' parameter
2) the current working directory
3) the user's home directory
4) the directory in which the calling module is located
The script searches in each of these locations, in order, and reads the
first and only the first rc file that is found. If a rc file is found,
the default_params are updated with the values from the rc file. The
rc_params are then returned.
The name of the rc file can be specified as a parameter ``rcfile_name``
to the script. If not given, the rc file name defaults to the name of
the calling module passed as an input parameter, with an 'rc' suffix.
Note that this function can be called more than once, in order to
initialize different sets of parameters from different rc files.
"""
rc_file_params = None
rc_file_paths = [os.getcwd(), _get_home_dir(), sys.path[0]]
rcfile_name = os.path.split(module_name)[-1] + 'rc'
rc_file_paths = [os.path.join(path, rcfile_name) for path in rc_file_paths]
if custom_rc_file != None: rc_file_paths = [custom_rc_file] + rc_file_paths
for rc_file_path in rc_file_paths:
if os.path.exists(rc_file_path):
logger.info("Loading custom rc_file %s"%rc_file_path)
rc_file_params = read_rc_file(self._default_rcparams_dict, rc_file_path)
break
self._rcParams._validation = False
# If an rc file was found, update the default_params with the values
# from that rc file.
if rc_file_params != None:
for key in rc_file_params.iterkeys():
self._rcParams[key] = rc_file_params.original_value[key]
logger.info("custom '%s' parameter loaded"%key)
else:
logger.info("no rc file found. Using parameters from rcparams.default")
# Now run the validation on all the items in the default_params
# instance (as values read from rcparams.defaults have not yet been
# validated).
self.validate_params()
self._rcParams._validation = True
self._initialized = True
# Check if a random_seed was loaded from the rcfile. If not (if
# random_seed==None), then choose a random random_seed, and store it in
# rcParams so that it can be written to a file at the end of model
# runs, and saved for later reuse (for testing, etc.).
if self._rcParams['random_seed'] == None:
# Seed the random_seed with a known random integer, and save the seed for
# later reuse (for testing, etc.).
self._rcParams['random_seed'] = int(10**8 * np.random.random())
np.random.seed(int(self._rcParams['random_seed']))
logger.debug("Random seed set to %s"%int(self._rcParams['random_seed']))
def write_RC_file(self, outputFilename, docstring=None):
"""
Write default rcParams to a file after updating them from the currently
loaded rcParams dictionary. Any keys in the updated dictionary that are
not already defined in the default parameter files (rcparams.defaults)
used to build the rcParams dictionary are ignored (as read_rc_file
would reject unknown keys anyways when the rc file is read back in).
"""
# The default string used as the header of rc_files (if an alternative one
# is not provided).
default_RCfile_docstring = """# Default values of parameters for the Chitwan Valley Agent-based Model. Values
# are read in to set defaults prior to initialization of the model by the
# runmodel script.
#
# Alex Zvoleff, azvoleff@conservation.org"""
if self._default_parsed_lines == None:
logger.warning("rcparams_defaults have not yet been read into this rc_params_management instance")
# Finally, write rc file to outputFilename.
outFile = open(outputFilename, "w")
if docstring == None:
outFile.writelines("%s\n\n"%(default_RCfile_docstring))
else:
outFile.writelines("%s\n\n"%(docstring))
for (module, linenum, key, value, comment) in self._default_parsed_lines:
if key == "" and value == "":
outFile.write("%s\n"%(comment)) # if comment is blank, just write a newline to the file
continue
elif comment != '':
# If there is a comment at the end of a line with a key : value
# pair, ensure the comment is preceded by a blank space
comment = ' ' + comment
# Update the keyvalues from any rcparams instance handed to the
# write_RC_file function:
if key in self._rcParams:
value = self._rcParams[key]
outFile.write("%s : %s%s\n"%(key, value, comment))
outFile.close()
| gpl-3.0 |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/IPython/lib/tests/test_latextools.py | 4 | 3811 | # encoding: utf-8
"""Tests for IPython.utils.path.py"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from unittest.mock import patch
import nose.tools as nt
from IPython.lib import latextools
from IPython.testing.decorators import onlyif_cmds_exist, skipif_not_matplotlib
from IPython.utils.process import FindCmdError
def test_latex_to_png_dvipng_fails_when_no_cmd():
"""
`latex_to_png_dvipng` should return None when there is no required command
"""
for command in ['latex', 'dvipng']:
yield (check_latex_to_png_dvipng_fails_when_no_cmd, command)
def check_latex_to_png_dvipng_fails_when_no_cmd(command):
def mock_find_cmd(arg):
if arg == command:
raise FindCmdError
with patch.object(latextools, "find_cmd", mock_find_cmd):
nt.assert_equal(latextools.latex_to_png_dvipng("whatever", True),
None)
@onlyif_cmds_exist('latex', 'dvipng')
def test_latex_to_png_dvipng_runs():
"""
Test that latex_to_png_dvipng just runs without error.
"""
def mock_kpsewhich(filename):
nt.assert_equal(filename, "breqn.sty")
return None
for (s, wrap) in [(u"$$x^2$$", False), (u"x^2", True)]:
yield (latextools.latex_to_png_dvipng, s, wrap)
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
yield (latextools.latex_to_png_dvipng, s, wrap)
@skipif_not_matplotlib
def test_latex_to_png_mpl_runs():
"""
Test that latex_to_png_mpl just runs without error.
"""
def mock_kpsewhich(filename):
nt.assert_equal(filename, "breqn.sty")
return None
for (s, wrap) in [("$x^2$", False), ("x^2", True)]:
yield (latextools.latex_to_png_mpl, s, wrap)
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
yield (latextools.latex_to_png_mpl, s, wrap)
@skipif_not_matplotlib
def test_latex_to_html():
img = latextools.latex_to_html("$x^2$")
nt.assert_in("data:image/png;base64,iVBOR", img)
def test_genelatex_no_wrap():
"""
Test genelatex with wrap=False.
"""
def mock_kpsewhich(filename):
assert False, ("kpsewhich should not be called "
"(called with {0})".format(filename))
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
nt.assert_equal(
'\n'.join(latextools.genelatex("body text", False)),
r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\pagestyle{empty}
\begin{document}
body text
\end{document}''')
def test_genelatex_wrap_with_breqn():
"""
Test genelatex with wrap=True for the case breqn.sty is installed.
"""
def mock_kpsewhich(filename):
nt.assert_equal(filename, "breqn.sty")
return "path/to/breqn.sty"
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
nt.assert_equal(
'\n'.join(latextools.genelatex("x^2", True)),
r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\usepackage{breqn}
\pagestyle{empty}
\begin{document}
\begin{dmath*}
x^2
\end{dmath*}
\end{document}''')
def test_genelatex_wrap_without_breqn():
"""
Test genelatex with wrap=True for the case breqn.sty is not installed.
"""
def mock_kpsewhich(filename):
nt.assert_equal(filename, "breqn.sty")
return None
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
nt.assert_equal(
'\n'.join(latextools.genelatex("x^2", True)),
r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\pagestyle{empty}
\begin{document}
$$x^2$$
\end{document}''')
| bsd-2-clause |
meetshah1995/pytorch-semseg | ptsemseg/loader/camvid_loader.py | 1 | 4256 | import os
import collections
import torch
import numpy as np
import scipy.misc as m
import matplotlib.pyplot as plt
from torch.utils import data
from ptsemseg.augmentations import Compose, RandomHorizontallyFlip, RandomRotate
class camvidLoader(data.Dataset):
def __init__(
self,
root,
split="train",
is_transform=False,
img_size=None,
augmentations=None,
img_norm=True,
test_mode=False,
):
self.root = root
self.split = split
self.img_size = [360, 480]
self.is_transform = is_transform
self.augmentations = augmentations
self.img_norm = img_norm
self.test_mode = test_mode
self.mean = np.array([104.00699, 116.66877, 122.67892])
self.n_classes = 12
self.files = collections.defaultdict(list)
if not self.test_mode:
for split in ["train", "test", "val"]:
file_list = os.listdir(root + "/" + split)
self.files[split] = file_list
def __len__(self):
return len(self.files[self.split])
def __getitem__(self, index):
img_name = self.files[self.split][index]
img_path = self.root + "/" + self.split + "/" + img_name
lbl_path = self.root + "/" + self.split + "annot/" + img_name
img = m.imread(img_path)
img = np.array(img, dtype=np.uint8)
lbl = m.imread(lbl_path)
lbl = np.array(lbl, dtype=np.int8)
if self.augmentations is not None:
img, lbl = self.augmentations(img, lbl)
if self.is_transform:
img, lbl = self.transform(img, lbl)
return img, lbl
def transform(self, img, lbl):
img = m.imresize(img, (self.img_size[0], self.img_size[1])) # uint8 with RGB mode
img = img[:, :, ::-1] # RGB -> BGR
img = img.astype(np.float64)
img -= self.mean
if self.img_norm:
# Resize scales images from 0 to 255, thus we need
# to divide by 255.0
img = img.astype(float) / 255.0
# NHWC -> NCHW
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).float()
lbl = torch.from_numpy(lbl).long()
return img, lbl
def decode_segmap(self, temp, plot=False):
Sky = [128, 128, 128]
Building = [128, 0, 0]
Pole = [192, 192, 128]
Road = [128, 64, 128]
Pavement = [60, 40, 222]
Tree = [128, 128, 0]
SignSymbol = [192, 128, 128]
Fence = [64, 64, 128]
Car = [64, 0, 128]
Pedestrian = [64, 64, 0]
Bicyclist = [0, 128, 192]
Unlabelled = [0, 0, 0]
label_colours = np.array(
[
Sky,
Building,
Pole,
Road,
Pavement,
Tree,
SignSymbol,
Fence,
Car,
Pedestrian,
Bicyclist,
Unlabelled,
]
)
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0, self.n_classes):
r[temp == l] = label_colours[l, 0]
g[temp == l] = label_colours[l, 1]
b[temp == l] = label_colours[l, 2]
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
return rgb
if __name__ == "__main__":
local_path = "/home/meetshah1995/datasets/segnet/CamVid"
augmentations = Compose([RandomRotate(10), RandomHorizontallyFlip()])
dst = camvidLoader(local_path, is_transform=True, augmentations=augmentations)
bs = 4
trainloader = data.DataLoader(dst, batch_size=bs)
for i, data_samples in enumerate(trainloader):
imgs, labels = data_samples
imgs = imgs.numpy()[:, ::-1, :, :]
imgs = np.transpose(imgs, [0, 2, 3, 1])
f, axarr = plt.subplots(bs, 2)
for j in range(bs):
axarr[j][0].imshow(imgs[j])
axarr[j][1].imshow(dst.decode_segmap(labels.numpy()[j]))
plt.show()
a = input()
if a == "ex":
break
else:
plt.close()
| mit |
uber-common/deck.gl | bindings/pydeck/examples/icon_layer.py | 1 | 1113 | """
IconLayer
=========
Location of biergartens in Germany listed on OpenStreetMap as of early 2020.
"""
import pydeck as pdk
import pandas as pd
# Data from OpenStreetMap, accessed via osmpy
DATA_URL = "https://raw.githubusercontent.com/ajduberstein/geo_datasets/master/biergartens.json"
ICON_URL = "https://upload.wikimedia.org/wikipedia/commons/c/c4/Projet_bi%C3%A8re_logo_v2.png"
icon_data = {
# Icon from Wikimedia, used the Creative Commons Attribution-Share Alike 3.0
# Unported, 2.5 Generic, 2.0 Generic and 1.0 Generic licenses
"url": ICON_URL,
"width": 242,
"height": 242,
"anchorY": 242,
}
data = pd.read_json(DATA_URL)
data["icon_data"] = None
for i in data.index:
data["icon_data"][i] = icon_data
view_state = pdk.data_utils.compute_view(data[["lon", "lat"]], 0.1)
icon_layer = pdk.Layer(
type="IconLayer",
data=data,
get_icon="icon_data",
get_size=4,
size_scale=15,
get_position=["lon", "lat"],
pickable=True,
)
r = pdk.Deck(layers=[icon_layer], initial_view_state=view_state, tooltip={"text": "{tags}"})
r.to_html("icon_layer.html")
| mit |
DR08/mxnet | example/speech_recognition/stt_utils.py | 44 | 5892 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import os.path
import numpy as np
import soundfile
from numpy.lib.stride_tricks import as_strided
logger = logging.getLogger(__name__)
def calc_feat_dim(window, max_freq):
return int(0.001 * window * max_freq) + 1
def conv_output_length(input_length, filter_size, border_mode, stride,
dilation=1):
""" Compute the length of the output sequence after 1D convolution along
time. Note that this function is in line with the function used in
Convolution1D class from Keras.
Params:
input_length (int): Length of the input sequence.
filter_size (int): Width of the convolution kernel.
border_mode (str): Only support `same` or `valid`.
stride (int): Stride size used in 1D convolution.
dilation (int)
"""
if input_length is None:
return None
assert border_mode in {'same', 'valid'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if border_mode == 'same':
output_length = input_length
elif border_mode == 'valid':
output_length = input_length - dilated_filter_size + 1
return (output_length + stride - 1) // stride
def spectrogram(samples, fft_length=256, sample_rate=2, hop_length=128):
"""
Compute the spectrogram for a real signal.
The parameters follow the naming convention of
matplotlib.mlab.specgram
Args:
samples (1D array): input audio signal
fft_length (int): number of elements in fft window
sample_rate (scalar): sample rate
hop_length (int): hop length (relative offset between neighboring
fft windows).
Returns:
x (2D array): spectrogram [frequency x time]
freq (1D array): frequency of each row in x
Note:
This is a truncating computation e.g. if fft_length=10,
hop_length=5 and the signal has 23 elements, then the
last 3 elements will be truncated.
"""
assert not np.iscomplexobj(samples), "Must not pass in complex numbers"
window = np.hanning(fft_length)[:, None]
window_norm = np.sum(window ** 2)
# The scaling below follows the convention of
# matplotlib.mlab.specgram which is the same as
# matlabs specgram.
scale = window_norm * sample_rate
trunc = (len(samples) - fft_length) % hop_length
x = samples[:len(samples) - trunc]
# "stride trick" reshape to include overlap
nshape = (fft_length, (len(x) - fft_length) // hop_length + 1)
nstrides = (x.strides[0], x.strides[0] * hop_length)
x = as_strided(x, shape=nshape, strides=nstrides)
# window stride sanity check
assert np.all(x[:, 1] == samples[hop_length:(hop_length + fft_length)])
# broadcast window, compute fft over columns and square mod
# This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT).
x = np.fft.rfft(x * window, axis=0)
x = np.absolute(x) ** 2
# scale, 2.0 for everything except dc and fft_length/2
x[1:-1, :] *= (2.0 / scale)
x[(0, -1), :] /= scale
freqs = float(sample_rate) / fft_length * np.arange(x.shape[0])
return x, freqs
def spectrogram_from_file(filename, step=10, window=20, max_freq=None,
eps=1e-14, overwrite=False, save_feature_as_csvfile=False):
""" Calculate the log of linear spectrogram from FFT energy
Params:
filename (str): Path to the audio file
step (int): Step size in milliseconds between windows
window (int): FFT window size in milliseconds
max_freq (int): Only FFT bins corresponding to frequencies between
[0, max_freq] are returned
eps (float): Small value to ensure numerical stability (for ln(x))
"""
csvfilename = filename.replace(".wav", ".csv")
if (os.path.isfile(csvfilename) is False) or overwrite:
with soundfile.SoundFile(filename) as sound_file:
audio = sound_file.read(dtype='float32')
sample_rate = sound_file.samplerate
if audio.ndim >= 2:
audio = np.mean(audio, 1)
if max_freq is None:
max_freq = sample_rate / 2
if max_freq > sample_rate / 2:
raise ValueError("max_freq must not be greater than half of "
" sample rate")
if step > window:
raise ValueError("step size must not be greater than window size")
hop_length = int(0.001 * step * sample_rate)
fft_length = int(0.001 * window * sample_rate)
pxx, freqs = spectrogram(
audio, fft_length=fft_length, sample_rate=sample_rate,
hop_length=hop_length)
ind = np.where(freqs <= max_freq)[0][-1] + 1
res = np.transpose(np.log(pxx[:ind, :] + eps))
if save_feature_as_csvfile:
np.savetxt(csvfilename, res)
return res
else:
return np.loadtxt(csvfilename)
| apache-2.0 |
evelynmitchell/smc | src/smc_sagews/smc_sagews/graphics.py | 4 | 25186 | ###############################################################################
#
# SageMathCloud: A collaborative web-based interface to Sage, IPython, LaTeX and the Terminal.
#
# Copyright (C) 2014, William Stein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import json, math
import sage_salvus
from uuid import uuid4
def uuid():
return str(uuid4())
def json_float(t):
if t is None:
return t
t = float(t)
# Neither of nan or inf get JSON'd in a way that works properly, for some reason. I don't understand why.
if math.isnan(t) or math.isinf(t):
return None
else:
return t
#######################################################
# Three.js based plotting
#######################################################
noneint = lambda n : n if n is None else int(n)
class ThreeJS(object):
def __init__(self, renderer=None, width=None, height=None,
frame=True, background=None, foreground=None,
spin=False, viewer=None, aspect_ratio=None,
frame_aspect_ratio = None,
**ignored):
"""
INPUT:
- renderer -- None (automatic), 'canvas2d', or 'webgl'
- width -- None (automatic) or an integer
- height -- None (automatic) or an integer
- frame -- bool (default: True); draw a frame that includes every object.
- background -- None (transparent); otherwise a color such as 'black' or 'white'
- foreground -- None (automatic = black if transparent; otherwise opposite of background);
or a color; this is used for drawing the frame and axes labels.
- spin -- False; if True, spins 3d plot, with number determining speed (requires webgl and mouse over plot)
- aspect_ratio -- None (square) or a triple [x,y,z] so that everything is scaled by x,y,z.
- frame_aspect_ratio -- synonym for aspect_ratio
- viewer -- synonym for renderer
"""
if viewer is not None and renderer is None:
renderer = viewer
if renderer not in [None, 'webgl', 'canvas', 'canvas2d']:
raise ValueError("unknown renderer='%s'; it must be None, webgl, or canvas2d"%renderer)
self._frame = frame
self._salvus = sage_salvus.salvus # object for this cell
self._id = uuid()
self._selector = "#%s"%self._id
self._obj = "$('%s').data('salvus-threejs')"%self._selector
self._salvus.html("<span id=%s class='salvus-3d-container'></span>"%self._id)
if not isinstance(spin, bool):
spin = json_float(spin)
if frame_aspect_ratio is not None:
aspect_ratio = frame_aspect_ratio
if aspect_ratio is not None:
if aspect_ratio == 1 or aspect_ratio=='automatic':
aspect_ratio = None
elif not (isinstance(aspect_ratio, (list, tuple)) and len(aspect_ratio) == 3):
raise TypeError("aspect_ratio must be None, 1 or a 3-tuple ")
else:
aspect_ratio = [json_float(x) for x in aspect_ratio]
self._salvus.javascript("$('%s').salvus_threejs(obj)"%self._selector,
once = False,
obj = {
'renderer' : renderer,
'width' : noneint(width),
'height' : noneint(height),
'background' : background,
'foreground' : foreground,
'spin' : spin,
'aspect_ratio' : aspect_ratio
})
self._graphics = []
self._call('init()')
def _call(self, s, obj=None):
cmd = 'misc.eval_until_defined({code:"%s", cb:(function(err, __t__) { __t__ != null ? __t__.%s:void 0 })})'%(
self._obj, s)
self._salvus.execute_javascript(cmd, obj=obj)
def bounding_box(self):
if not self._graphics:
return -1,1,-1,1,-1,1
b = self._graphics[0].bounding_box()
xmin, xmax, ymin, ymax, zmin, zmax = b[0][0], b[1][0], b[0][1], b[1][1], b[0][2], b[1][2]
for g in self._graphics[1:]:
b = g.bounding_box()
xmin, xmax, ymin, ymax, zmin, zmax = (
min(xmin,b[0][0]), max(b[1][0],xmax),
min(b[0][1],ymin), max(b[1][1],ymax),
min(b[0][2],zmin), max(b[1][2],zmax))
v = xmin, xmax, ymin, ymax, zmin, zmax
return [json_float(x) for x in v]
def frame_options(self):
xmin, xmax, ymin, ymax, zmin, zmax = self.bounding_box()
return {'xmin':xmin, 'xmax':xmax, 'ymin':ymin, 'ymax':ymax, 'zmin':zmin, 'zmax':zmax,
'draw' : self._frame}
def add(self, graphics3d, **kwds):
kwds = graphics3d._process_viewing_options(kwds)
self._graphics.append(graphics3d)
obj = {'obj' : graphics3d_to_jsonable(graphics3d),
'wireframe' : jsonable(kwds.get('wireframe')),
'set_frame' : self.frame_options()}
self._call('add_3dgraphics_obj(obj)', obj=obj)
def render_scene(self, force=True):
self._call('render_scene(obj)', obj={'force':force})
def add_text(self, pos, text, fontsize=18, fontface='Arial', sprite_alignment='topLeft'):
self._call('add_text(obj)',
obj={'pos':[json_float(pos[0]), json_float(pos[1]), json_float(pos[2])],'text':str(text),
'fontsize':int(fontsize),'fontface':str(fontface), 'sprite_alignment':str(sprite_alignment)})
def animate(self, fps=None, stop=None, mouseover=True):
self._call('animate(obj)', obj={'fps':noneint(fps), 'stop':stop, 'mouseover':mouseover})
def init_done(self):
self._call('init_done()')
def show_3d_plot_using_threejs(g, **kwds):
for k in ['spin', 'renderer', 'viewer', 'frame', 'height', 'width', 'background', 'foreground', 'aspect_ratio']:
extra_kwds = {} if g._extra_kwds is None else g._extra_kwds
if k in extra_kwds and k not in kwds:
kwds[k] = g._extra_kwds[k]
if 'camera_distance' in kwds:
del kwds['camera_distance'] # deprecated
t = ThreeJS(**kwds)
t.add(g, **kwds)
if kwds.get('spin', False):
t.animate(mouseover=False)
t.init_done()
import sage.plot.plot3d.index_face_set
import sage.plot.plot3d.shapes
import sage.plot.plot3d.base
import sage.plot.plot3d.shapes2
from sage.structure.element import Element
def jsonable(x):
if isinstance(x, Element):
return json_float(x)
return x
def graphics3d_to_jsonable(p):
obj_list = []
def parse_obj(obj):
material_name = ''
faces = []
for item in obj.split("\n"):
tmp = str(item.strip())
if not tmp:
continue
k = tmp.split()
if k[0] == "usemtl": # material name
material_name = k[1]
elif k[0] == 'f': # face
v = [int(a) for a in k[1:]]
faces.append(v)
# other types are parse elsewhere in a different pass.
return [{"material_name":material_name, "faces":faces}]
def parse_texture(p):
texture_dict = []
textures = p.texture_set()
for item in range(0,len(textures)):
texture_pop = textures.pop()
string = str(texture_pop)
item = string.split("(")[1]
name = item.split(",")[0]
color = texture_pop.color
tmp_dict = {"name":name,"color":color}
texture_dict.append(tmp_dict)
return texture_dict
def get_color(name,texture_set):
for item in range(0,len(texture_set)):
if(texture_set[item]["name"] == name):
color = texture_set[item]["color"]
color_list = [color[0],color[1],color[2]]
break
else:
color_list = []
return color_list
def parse_mtl(p):
mtl = p.mtl_str()
all_material = []
for item in mtl.split("\n"):
if "newmtl" in item:
tmp = str(item.strip())
tmp_list = []
try:
texture_set = parse_texture(p)
color = get_color(name,texture_set)
except (ValueError,UnboundLocalError):
pass
try:
tmp_list = {"name":name,"ambient":ambient, "specular":specular, "diffuse":diffuse, "illum":illum_list[0],
"shininess":shininess_list[0],"opacity":opacity_diffuse[3],"color":color}
all_material.append(tmp_list)
except (ValueError,UnboundLocalError):
pass
ambient = []
specular = []
diffuse = []
illum_list = []
shininess_list = []
opacity_list = []
opacity_diffuse = []
tmp_list = []
name = tmp.split()[1]
if "Ka" in item:
tmp = str(item.strip())
for t in tmp.split():
try:
ambient.append(json_float(t))
except ValueError:
pass
if "Ks" in item:
tmp = str(item.strip())
for t in tmp.split():
try:
specular.append(json_float(t))
except ValueError:
pass
if "Kd" in item:
tmp = str(item.strip())
for t in tmp.split():
try:
diffuse.append(json_float(t))
except ValueError:
pass
if "illum" in item:
tmp = str(item.strip())
for t in tmp.split():
try:
illum_list.append(json_float(t))
except ValueError:
pass
if "Ns" in item:
tmp = str(item.strip())
for t in tmp.split():
try:
shininess_list.append(json_float(t))
except ValueError:
pass
if "d" in item:
tmp = str(item.strip())
for t in tmp.split():
try:
opacity_diffuse.append(json_float(t))
except ValueError:
pass
try:
color = list(p.all[0].texture.color.rgb())
except (ValueError, AttributeError):
pass
try:
texture_set = parse_texture(p)
color = get_color(name,texture_set)
except (ValueError, AttributeError):
color = []
#pass
tmp_list = {"name":name,"ambient":ambient, "specular":specular, "diffuse":diffuse, "illum":illum_list[0],
"shininess":shininess_list[0],"opacity":opacity_diffuse[3],"color":color}
all_material.append(tmp_list)
return all_material
#####################################
# Conversion functions
#####################################
def convert_index_face_set(p, T, extra_kwds):
if T is not None:
p = p.transform(T=T)
face_geometry = parse_obj(p.obj())
material = parse_mtl(p)
vertex_geometry = []
obj = p.obj()
for item in obj.split("\n"):
if "v" in item:
tmp = str(item.strip())
for t in tmp.split():
try:
vertex_geometry.append(json_float(t))
except ValueError:
pass
myobj = {"face_geometry" : face_geometry,
"type" : 'index_face_set',
"vertex_geometry" : vertex_geometry,
"material" : material}
for e in ['wireframe', 'mesh']:
if p._extra_kwds is not None:
v = p._extra_kwds.get(e, None)
if v is not None:
myobj[e] = jsonable(v)
obj_list.append(myobj)
def convert_text3d(p, T, extra_kwds):
obj_list.append(
{"type" : "text",
"text" : p.string,
"pos" : [0,0,0] if T is None else T([0,0,0]),
"color" : "#" + p.get_texture().hex_rgb(),
'fontface' : str(extra_kwds.get('fontface', 'Arial')),
'constant_size' : bool(extra_kwds.get('constant_size', True)),
'fontsize' : int(extra_kwds.get('fontsize', 12))})
def convert_line(p, T, extra_kwds):
obj_list.append({"type" : "line",
"points" : p.points if T is None else [T.transform_point(point) for point in p.points],
"thickness" : jsonable(p.thickness),
"color" : "#" + p.get_texture().hex_rgb(),
"arrow_head" : bool(p.arrow_head)})
def convert_point(p, T, extra_kwds):
obj_list.append({"type" : "point",
"loc" : p.loc if T is None else T(p.loc),
"size" : json_float(p.size),
"color" : "#" + p.get_texture().hex_rgb()})
def convert_combination(p, T, extra_kwds):
for x in p.all:
handler(x)(x, T, p._extra_kwds)
def convert_transform_group(p, T, extra_kwds):
if T is not None:
T = T * p.get_transformation()
else:
T = p.get_transformation()
for x in p.all:
handler(x)(x, T, p._extra_kwds)
def nothing(p, T, extra_kwds):
pass
def handler(p):
if isinstance(p, sage.plot.plot3d.index_face_set.IndexFaceSet):
return convert_index_face_set
elif isinstance(p, sage.plot.plot3d.shapes.Text):
return convert_text3d
elif isinstance(p, sage.plot.plot3d.base.TransformGroup):
return convert_transform_group
elif isinstance(p, sage.plot.plot3d.base.Graphics3dGroup):
return convert_combination
elif isinstance(p, sage.plot.plot3d.shapes2.Line):
return convert_line
elif isinstance(p, sage.plot.plot3d.shapes2.Point):
return convert_point
elif isinstance(p, sage.plot.plot3d.base.PrimitiveObject):
return convert_index_face_set
elif isinstance(p, sage.plot.plot3d.base.Graphics3d):
# this is an empty scene
return nothing
else:
raise NotImplementedError("unhandled type ", type(p))
# start it going -- this modifies obj_list
handler(p)(p, None, None)
# now obj_list is full of the objects
return obj_list
###
# Interactive 2d Graphics
###
import os, matplotlib.figure
class InteractiveGraphics(object):
def __init__(self, g, **events):
self._g = g
self._events = events
def figure(self, **kwds):
if isinstance(self._g, matplotlib.figure.Figure):
return self._g
options = dict()
options.update(self._g.SHOW_OPTIONS)
options.update(self._g._extra_kwds)
options.update(kwds)
options.pop('dpi'); options.pop('transparent'); options.pop('fig_tight')
fig = self._g.matplotlib(**options)
from matplotlib.backends.backend_agg import FigureCanvasAgg
canvas = FigureCanvasAgg(fig)
fig.set_canvas(canvas)
fig.tight_layout() # critical, since sage does this -- if not, coords all wrong
return fig
def save(self, filename, **kwds):
if isinstance(self._g, matplotlib.figure.Figure):
self._g.savefig(filename)
else:
# When fig_tight=True (the default), the margins are very slightly different.
# I don't know how to properly account for this yet (or even if it is possible),
# since it only happens at figsize time -- do "a=plot(sin); a.save??".
# So for interactive graphics, we just set this to false no matter what.
kwds['fig_tight'] = False
self._g.save(filename, **kwds)
def show(self, **kwds):
fig = self.figure(**kwds)
ax = fig.axes[0]
# upper left data coordinates
xmin, ymax = ax.transData.inverted().transform( fig.transFigure.transform((0,1)) )
# lower right data coordinates
xmax, ymin = ax.transData.inverted().transform( fig.transFigure.transform((1,0)) )
id = '_a' + uuid().replace('-','')
def to_data_coords(p):
# 0<=x,y<=1
return ((xmax-xmin)*p[0] + xmin, (ymax-ymin)*(1-p[1]) + ymin)
if kwds.get('svg',False):
filename = '%s.svg'%id
del kwds['svg']
else:
filename = '%s.png'%id
fig.savefig(filename)
def f(event, p):
self._events[event](to_data_coords(p))
sage_salvus.salvus.namespace[id] = f
x = {}
for ev in self._events.keys():
x[ev] = id
sage_salvus.salvus.file(filename, show=True, events=x)
os.unlink(filename)
def __del__(self):
for ev in self._events:
u = self._id+ev
if u in sage_salvus.salvus.namespace:
del sage_salvus.salvus.namespace[u]
###
# D3-based interactive 2d Graphics
###
###
# The following is a modified version of graph_plot_js.py from the Sage library, which was
# written by Nathann Cohen in 2013.
###
def graph_to_d3_jsonable(G,
vertex_labels = True,
edge_labels = False,
vertex_partition = [],
edge_partition = [],
force_spring_layout = False,
charge = -120,
link_distance = 50,
link_strength = 1,
gravity = .04,
vertex_size = 7,
edge_thickness = 2,
width = None,
height = None,
**ignored):
r"""
Display a graph in SageMathCloud using the D3 visualization library.
INPUT:
- ``G`` -- the graph
- ``vertex_labels`` (boolean) -- Whether to display vertex labels (set to
``True`` by default).
- ``edge_labels`` (boolean) -- Whether to display edge labels (set to
``False`` by default).
- ``vertex_partition`` -- a list of lists representing a partition of the
vertex set. Vertices are then colored in the graph according to the
partition. Set to ``[]`` by default.
- ``edge_partition`` -- same as ``vertex_partition``, with edges
instead. Set to ``[]`` by default.
- ``force_spring_layout`` -- whether to take sage's position into account if
there is one (see :meth:`~sage.graphs.generic_graph.GenericGraph.` and
:meth:`~sage.graphs.generic_graph.GenericGraph.`), or to compute a spring
layout. Set to ``False`` by default.
- ``vertex_size`` -- The size of a vertex' circle. Set to `7` by default.
- ``edge_thickness`` -- Thickness of an edge. Set to ``2`` by default.
- ``charge`` -- the vertices' charge. Defines how they repulse each
other. See `<https://github.com/mbostock/d3/wiki/Force-Layout>`_ for more
information. Set to ``-120`` by default.
- ``link_distance`` -- See
`<https://github.com/mbostock/d3/wiki/Force-Layout>`_ for more
information. Set to ``30`` by default.
- ``link_strength`` -- See
`<https://github.com/mbostock/d3/wiki/Force-Layout>`_ for more
information. Set to ``1.5`` by default.
- ``gravity`` -- See
`<https://github.com/mbostock/d3/wiki/Force-Layout>`_ for more
information. Set to ``0.04`` by default.
EXAMPLES::
show(graphs.RandomTree(50), d3=True)
show(graphs.PetersenGraph(), d3=True, vertex_partition=g.coloring())
show(graphs.DodecahedralGraph(), d3=True, force_spring_layout=True)
show(graphs.DodecahedralGraph(), d3=True)
g = digraphs.DeBruijn(2,2)
g.allow_multiple_edges(True)
g.add_edge("10","10","a")
g.add_edge("10","10","b")
g.add_edge("10","10","c")
g.add_edge("10","10","d")
g.add_edge("01","11","1")
show(g, d3=True, vertex_labels=True,edge_labels=True,
link_distance=200,gravity=.05,charge=-500,
edge_partition=[[("11","12","2"),("21","21","a")]],
edge_thickness=4)
"""
directed = G.is_directed()
multiple_edges = G.has_multiple_edges()
# Associated an integer to each vertex
v_to_id = {v: i for i, v in enumerate(G.vertices())}
# Vertex colors
color = {i: len(vertex_partition) for i in range(G.order())}
for i, l in enumerate(vertex_partition):
for v in l:
color[v_to_id[v]] = i
# Vertex list
nodes = []
for v in G.vertices():
nodes.append({"name": str(v), "group": str(color[v_to_id[v]])})
# Edge colors.
edge_color_default = "#aaa"
from sage.plot.colors import rainbow
color_list = rainbow(len(edge_partition))
edge_color = {}
for i, l in enumerate(edge_partition):
for e in l:
u, v, label = e if len(e) == 3 else e+(None,)
edge_color[u, v, label] = color_list[i]
if not directed:
edge_color[v, u, label] = color_list[i]
# Edge list
edges = []
seen = {} # How many times has this edge been seen ?
for u, v, l in G.edges():
# Edge color
color = edge_color.get((u, v, l), edge_color_default)
# Computes the curve of the edge
curve = 0
# Loop ?
if u == v:
seen[u, v] = seen.get((u, v), 0)+1
curve = seen[u, v]*10+10
# For directed graphs, one also has to take into accounts
# edges in the opposite direction
elif directed:
if G.has_edge(v, u):
seen[u, v] = seen.get((u, v), 0)+1
curve = seen[u, v]*15
else:
if multiple_edges and len(G.edge_label(u, v)) != 1:
# Multiple edges. The first one has curve 15, then
# -15, then 30, then -30, ...
seen[u, v] = seen.get((u, v), 0) + 1
curve = (1 if seen[u, v] % 2 else -1)*(seen[u, v]//2)*15
elif not directed and multiple_edges:
# Same formula as above for multiple edges
if len(G.edge_label(u, v)) != 1:
seen[u, v] = seen.get((u, v), 0) + 1
curve = (1 if seen[u, v] % 2 else -1)*(seen[u, v]//2)*15
# Adding the edge to the list
edges.append({"source": v_to_id[u],
"target": v_to_id[v],
"strength": 0,
"color": color,
"curve": curve,
"name": str(l) if edge_labels else ""})
loops = [e for e in edges if e["source"] == e["target"]]
edges = [e for e in edges if e["source"] != e["target"]]
# Defines the vertices' layout if possible
Gpos = G.get_pos()
pos = []
if Gpos is not None and force_spring_layout is False:
charge = 0
link_strength = 0
gravity = 0
for v in G.vertices():
x, y = Gpos[v]
pos.append([json_float(x), json_float(-y)])
return {"nodes" : nodes,
"links" : edges, "loops": loops, "pos": pos,
"directed" : G.is_directed(),
"charge" : int(charge),
"link_distance" : int(link_distance),
"link_strength" : int(link_strength),
"gravity" : float(gravity),
"vertex_labels" : bool(vertex_labels),
"edge_labels" : bool(edge_labels),
"vertex_size" : int(vertex_size),
"edge_thickness" : int(edge_thickness),
"width" : json_float(width),
"height" : json_float(height) }
| gpl-3.0 |
gofortargets/CNN_brandsafety | knx/text/kte/extract.py | 1 | 98594 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import matplotlib
matplotlib.use('Agg')
import logging
import argparse
import cPickle as pickle
import gc
from itertools import combinations
import json
import math
# import matplotlib.pyplot as plt
import multiprocessing as mp
import numpy as np
import os
import re
import scipy.optimize as optimize
from scipy.sparse import coo_matrix, csr_matrix
import scipy
import sys
import threading
from tornado.ioloop import IOLoop
from tornado.web import Application, RequestHandler, StaticFileHandler, asynchronous
import time
from pulp import *
from pymongo import MongoClient
from BS.knx.text.chunker import MaxentNPChunker
from BS.knx.text import KnorexNERTagger
from BS.knx.text import DocToFeature, Lemmatizer, tf_to_tfidf, tf_to_okapi, tf_to_midf
from BS.knx.text.feature_to_arff import FeatureToArff
from knx.util.logging import Timing, Unbuffered
from knx.version import VERSION
logging.basicConfig(level=logging.INFO)
DEBUG = False
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger(__name__)
###################
# Helper function #
###################
def loadarff(filename):
"""Load ARFF file which contains feature vectors with the document name as the last index
**Parameters**
filename : string
The ARFF file to be loaded
**Returns**
tf_score : coo_matrix
The feature vectors
concepts : list
The list of concept names (as document names)
**Notes**
This method assumes that the ARFF file is structured such that the last column will be a string attribute
"""
with open(filename, 'r') as arff:
lines = arff.readlines()
num_attr = 0
for idx, line in enumerate(lines):
if line.startswith('@attribute'):
num_attr += 1
if line.startswith('@data'):
dataIdx = idx + 1
break
num_attr -= 1
lines = lines[dataIdx:]
is_sparse = (lines[0][0] == '{')
if is_sparse:
data = []
indices = []
indptr = [0]
concepts = []
for row, line in enumerate(lines):
# Read sparse
is_sparse = True
line = line.strip('{} \r\n\t')
tmp = line.rsplit(' ', 1)
items = map(int, re.split('[, ]', tmp[0]))
concepts.append(re.sub(r'(?<!\\)_', ' ', tmp[1][1:-1]).replace('\\\'', '\'').replace(r'\_', '_'))
data.extend(items[1::2])
indices.extend(items[0:-1:2])
indptr.append(indptr[-1] + ((len(items) - 1) / 2))
data = np.array(data, dtype=float)
indices = np.array(indices, dtype=np.intc)
indptr = np.array(indptr, dtype=np.intc)
return (csr_matrix((data, indices, indptr), shape=(len(concepts), num_attr)), concepts)
else:
data = []
concepts = []
for row, line in enumerate(lines):
# Read dense
line = line.strip(' \r\n\t')
values = line.split(',')
data.append(values[:len(values) - 1])
concepts.append(values[-1])
return (coo_matrix(data, dtype=float), concepts)
def update_seid(t_phrase, reduce_id, sid, eid):
if sid == -1 or eid == -1:
return sid, eid
#Update start index (sid)
reduce_id = sorted(reduce_id)
prev, bias = 0, False
for idx in reduce_id:
if idx == prev:
prev += 1
sid += len(t_phrase[idx][0]) + 1
if t_phrase[idx][0] == "%":
bias = True
else:
break
if bias or (prev + 1 < len(t_phrase) and t_phrase[prev + 1][0] == "%"):
sid -= 1
#Update end index (eid)
reduce_id = sorted(reduce_id, reverse=True)
prev = len(t_phrase) - 1
for idx in reduce_id:
if idx == prev:
prev -= 1
eid -= len(t_phrase[idx][0]) + 1
else:
break
return sid, eid
def filter_noun_phrases(phrases, lemmatizer=None):
"""Filter phrases and produce phrases suitable as key phrases
**Parameters**
phrases : list of phrases
The list of phrases where each phrase is in (word, pos) format, where pos is the POS tag for the word
lemmatizer : the lemmatizer to be used (optional)
The lemmatizer should implement the method "lemmatize" that accepts a word to be lemmatized and an optional POS
"""
if lemmatizer is None:
lemmatizer = Lemmatizer()
def lemmatize_nouns(word_pos):
word, pos = word_pos
# Initially we want to remove if pos in {'NN', 'NNS'}, but that caused "Bras Basah" in start of sentence
# to be lemmatized as "Bra Basah"
# Need to couple this with NER, but until that happens, let's not make such silly lemmatization
# by considering only lowercase words
if word.islower() and pos.startswith('NN'):
lemmatized = lemmatizer.lemmatize(word.lower(), 'n')
word = lemmatized
return (word, pos)
def reduce_phrase(phrase):
# Remove all words with those tags
DISALLOWED = {'DT', 'EX', 'LS', 'PDT', 'POS', 'PRP', 'PRP$', 'RB', 'RBR', 'RBS', 'WDT',
'WP', 'WP$', 'WRB', "''", '``'} #, 'SYM', '$', 'CD'
t_phrase, sid, eid = phrase
res, reduce_id = list(), list()
for i, word_pos in enumerate(t_phrase):
if word_pos[1] in DISALLOWED:
reduce_id.append(i)
continue
res.append(word_pos)
sid, eid = update_seid(t_phrase, reduce_id, sid, eid)
return (res, sid, eid)
def lemmatize_phrase(phrase):
t_phrase, sid, eid = phrase
res = []
prev = tuple()
for word_pos in t_phrase:
if len(prev) == 0 or prev[1] != 'CD':
res.append(lemmatize_nouns(word_pos))
prev = word_pos
else:
res.append(word_pos)
return (res, sid, eid)
def contains_noun(phrase):
return any(pos.startswith('NN') for word, pos in phrase[0])
def remove_cd(phrase):
t_phrase, sid, eid = phrase
pos_str = ' '.join(zip(*t_phrase)[1])
if pos_str in {'CD NNS', 'CD JJ NNS', 'CD NN NNS'}:
sid += len(t_phrase[0][0]) + 1
del t_phrase[0:1]
# return False
elif pos_str in {'CD CD NNS', 'CD CD JJ NNS', 'CD CD NN NNS'}:
sid += len(t_phrase[0][0]) + 1 + len(t_phrase[1][0]) + 1
del t_phrase[0:2]
# return False
return (t_phrase, sid, eid)
# return True
def remove_boundary(phrase):
def traverse(phrase, rev=False, pos=0):
res = []
tmp = (list(reversed(phrase)) if rev else phrase[:])
n = len(phrase)
for i, word_pos in enumerate(tmp):
if i == pos:
if word_pos[1] in NON_BOUNDARY_POS or (word_pos in NON_BEGIN_WORDS and not rev) or\
(word_pos in NON_END_WORDS and rev):
if rev:
res.append(n - i - 1)
else:
res.append(i)
pos += 1
else:
break
return res
NON_BOUNDARY_POS = {'CC', 'IN', ',', '.', '-LSB-', '-RSB-', '-LRB-', '-RRB-', 'CD', 'SYM', '$'} #,
NON_BEGIN_WORDS = {('least','JJS')}
NON_END_WORDS = {('one','NN'), ('ones','NNS'), ('thing', 'NN'), ('things', 'NNS')}
t_phrase, sid, eid = phrase
reduce_id = traverse(t_phrase)
s = (max(reduce_id) + 1 if reduce_id else 0)
r1 = traverse(t_phrase, rev=True)
e = (min(r1) if r1 else len(t_phrase))
reduce_id.extend(r1)
sid, eid = update_seid(t_phrase, reduce_id, sid, eid)
# res = [word_pos for i, word_pos in enumerate(t_phrase)
# if word_pos[1] not in NON_BOUNDARY or 0 < i < len(t_phrase) - 1]
return (t_phrase[s:e], sid, eid)
def is_not_null(phrase):
t_phrase, sid, eid = phrase
if len(t_phrase) == 0:
return False
return True
phrases = [reduce_phrase(phrase) for phrase in phrases]
phrases = filter(is_not_null, phrases)
phrases = [remove_cd(phrase) for phrase in phrases]
# phrases = [lemmatize_phrase(phrase) for phrase in phrases]
phrases = [remove_boundary(phrase) for phrase in phrases]
phrases = filter(contains_noun, phrases)
return phrases
###########################
# Functions for debugging #
###########################
def arctanh_param(a, b, c, d, x):
b1 = np.tanh(b) / max(x)
c1 = np.tanh(c)
x1 = b1 * x ** 2 + c1
x1[x1 > 1] = 1
x1[x1 < -1] = -1
return a * np.arctanh(x1) + d
def residuals(p, y, x):
a, b, c, d = p
return y - arctanh_param(a, b, c, d, x)
# return x * (y - arctanh_param(a, b, c, d, x)) # For fitting more closely to the higher weights
def fit(x, y):
"""Fit the data (x, y) with the parameterized arctanh function: a * arctanh(bx + c) + d
"""
p0 = np.array([1, 0, 0.1, 0], dtype=np.float64)
plsq, cov_x, infodict, mesg, ier = optimize.leastsq(residuals, p0, args=(y, x), maxfev=5000, full_output=True)
a, b, c, d = plsq
b1 = np.tanh(b) / (len(x) - 1)
c1 = (1 - np.abs(b1)) * np.tanh(c)
print 'Fitted function:'
print 'y = %.4E * arctanh(%.4Ex + %.4E) + %.4E' % (a, b1, c1, d)
print ier, mesg
print infodict['nfev']
return arctanh_param(a, b, c, d, x)
def draw_vector(vect):
vect_arr = np.array(sorted(vect.toarray()[0]), dtype=np.float64)
vect_arr = vect_arr[-1000:]
vect_arr = vect_arr[vect_arr > 0]
if len(vect_arr) == 0:
return
x = np.arange(len(vect_arr), dtype=np.float64)
y = np.array(vect_arr)
z = fit(x, y)
plt.figure(figsize=(10, 5))
plt.plot(x, y, linewidth="2", label='data', alpha=0.5)
plt.plot(x, z, linewidth="2", linestyle='--', color='k', label='arctanh least-square fit')
plt.legend(loc='upper center')
plt.savefig('vector.png')
####################
# Arguments parser #
####################
class FileAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values[0])
if len(values) >= 2:
setattr(namespace, self.dest + '_arff', values[1])
if len(values) >= 3:
setattr(namespace, self.dest + '_vocab', values[2])
class ArffAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values[0])
if len(values) > 1:
setattr(namespace, self.dest + '_vocab', values[1])
class StartAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if option_string == '--start_stdin':
setattr(namespace, self.dest, 2)
elif option_string == '--nostart':
setattr(namespace, self.dest, 0)
else:
setattr(namespace, self.dest, 1)
scorers = {
'tf': 'scorer_tf',
'tfidf': 'scorer_tfidf',
'midf': 'scorer_midf',
'okapi': 'scorer_okapi'
}
def parse_arguments():
"""Parse arguments from command line"""
parser = argparse.ArgumentParser(description=('Starts the concept extractor, or test the concept extractor if '
'either --testarff or --testdir is specififed'),
epilog=('Due to the structure of the optional arguments, please provide the term '
'weighting scheme immediately after the program name'))
parser.add_argument('scorer_name', choices=scorers.keys(),
help='The term weighting scheme used to process the raw TF counts')
parser.add_argument('-p', '--port', dest='port', default=8205, type=int,
help='Specify the port in which the server should start (default to 8205)')
parser.add_argument('--boost_method', dest='boost_method', metavar='[012]', default=0, type=int,
help=('Specify the boosting method that will be used. 0 means no boosting, 1 means concept '
'boosting, 2 means concept boosting with amplification (defaults to 0)'))
parser.add_argument('--word_normalization', dest='word_normalization', metavar='{stem|lemmatize|none}',
default='stem',
help=('Specify how words should be normalized. Options: stem, lemmatize, none\n'
'(defaults to stem)'))
parser.add_argument('--n_jobs', dest='n_jobs', action='store', default=1, type=int,
help='The number of processes to run')
start_group = parser.add_mutually_exclusive_group()
start_group.add_argument('--start_stdin', nargs=0, dest='start', default=1, action=StartAction,
help='Flag to start receiving input from standard input. Default is to run as server.')
start_group.add_argument('--nostart', nargs=0, dest='start', default=1, action=StartAction,
help='Flag to not start anything. Default is to run as server')
lowercase_group = parser.add_mutually_exclusive_group()
lowercase_group.add_argument('--lowercase', dest='lowercase', default=True, action='store_true',
help='Enable lowercasing on each word')
lowercase_group.add_argument('--nolowercase', dest='lowercase', default=True, action='store_false',
help='Disable lowercasing on each input word (default)')
keep_nnp_group = parser.add_mutually_exclusive_group()
keep_nnp_group.add_argument('--keep_nnp', dest='keep_nnp', default=False, action='store_true',
help=('Enable keeping words with POS NNP or NNPS intact, without stemming or '
'lowercasing'))
keep_nnp_group.add_argument('--nokeep_nnp', dest='keep_nnp', default=False, action='store_false',
help=('Disable keeping words with POS NNP or NNPS intact, without stemming or '
'lowercasing (default)'))
transliteration_group = parser.add_mutually_exclusive_group()
transliteration_group.add_argument('--transliteration', dest='transliteration', default=True, action='store_true',
help=('Enable transliteration on Unicode input (e.g., é into e, “ to ") '
'(default)'))
transliteration_group.add_argument('--notransliteration', dest='transliteration', default=True,
action='store_false',
help='Disable transliteration on Unicode input (e.g., é into e, “ to ")')
logging_group = parser.add_mutually_exclusive_group()
logging_group.add_argument('--logging', dest='logging', action='store_true', default=True,
help='Enable printing timing messages for each component (default)')
logging_group.add_argument('--nologging', dest='logging', action='store_false', default=True,
help='Disable printing timing messages for each component')
train_source = parser.add_mutually_exclusive_group(required=True)
train_source.add_argument('--traindir', dest='traindir', action=FileAction, nargs='+',
metavar=('traindir', 'arff_output [vocab_output]'),
help=('Training data comes from texts in a directory. If arff_output is provided, the '
'raw TF counts is written to arff_output. If vocab_output is also provided, the '
'vocabulary is dumped into vocab_output'))
train_source.add_argument('--trainarff', dest='trainarff', action=ArffAction, nargs=2,
metavar=('train.arff', 'vocabulary_file'),
help='Training data comes from ARFF file, with the specified vocabulary')
train_source.add_argument('--trainpickle', dest='trainpickle', action=ArffAction, nargs=2,
metavar=('train.pickle', 'vocabulary_file'),
help='Training data comes from pickled csr_matrix file, with the specified vocabulary')
test_source = parser.add_mutually_exclusive_group()
test_source.add_argument('--testdir', dest='testdir', action=FileAction, nargs='+',
metavar=('testdir', 'arff_output'),
help='Test from a folder. If arff_output is provided the raw TF counts is written '
'to arff_output.')
return parser.parse_args(sys.argv[1:])
class KeyTermsExtractor(object):
"""Extract key concepts and key words from a given input text
This class is instantiated with a path to a training directory or training arff
**Parameters**
scorer_name : string, 'tfidf' by default
The scorers to be used to compute the term weight matrix.
Available options are:
* 'tf' : Plain term frequency.
* 'tfidf' : Standard TF-IDF term weighting scheme.
* 'midf' : Modified TF-IDF term weighting scheme.
* 'okapi' : Okapi BM25 term weighting scheme.
traindir : string, None by default
The directory containing training files.
This directory is expected to contain files which names represent the concept they contain.
See also traindir_arff and traindir_vocab for more information.
Either this option, trainarff, or trainpickle must be specified.
traindir_arff : string, optional, None by default
The filename to dump the ARFF file containing term frequency generated during training from traindir.
traindir_vocab : string, optional, None by default
The filename to dump the vocab file containing the vocabulary mapping generated during training from traindir.
trainarff : string, None by default
The ARFF file containing term frequency as the training data.
The ARFF file is expected to be compatible with `loadarff` method, that is, the last column is expected to be a
string attribute.
This option requires trainarff_vocab to be specified also.
See also trainarff_vocab.
Either this option, traindir, or trainpickle must be specified.
trainarff_vocab : string, required if trainarff is used, None by default
The vocab file containing pickled dictionary of vocabulary mapping.
trainpickle : string, None by default
The pickle file containing term frequency as the training data.
This option requires trainpickle_vocab to be specified also.
Either this option, traindir, or trainarff must be specified.
trainpickle_vocab : string, required if trainpickle is used, None by default
The vocab file containing pickled dictionary of vocabulary mapping.
testdir : string, optional, None
The directory containing files from which top concepts and words will be extracted.
If this option of testarff is specified, the result will be printed to console.
See also testdir_arff for more information.
testdir_arff : string, optional, None by default
The filename to dump the ARFF file containing term frequency generated from files in testdir
testarff : string, optional, None by default
The ARFF file containing term frequency as the test data.
The ARFF file is expected to be compatible with `loadarff` method, that is, the last column is expected to be a
string attribute.
testarff_vocab : string, optional, None by default
The vocab file containing pickled dictionary of vocabulary mapping.
This is optional since in normal cases, the vocabulary from training phase is the one to be used. The use of
this option will trigger synchronization of this vocabulary with the one generated during training.
This option is available in the case the test ARFF file is heavy to generate.
But for best result, please use testdir option.
lowercase : boolean, True by default
Whether to convert the input text into lowercase
keep_nnp : boolean, False by default
Whether to keep words with POS NNP and NNPS intact, that is, without converting it into lowercase or
stemming it.
transliteration : boolean, True by default
Whether to do transliteration on Unicode input. For example, it will convert LEFT DOUBLE QUOTATION MARK into
ASCII version double quotes
boost_method : int, 1 by default
The boosting method to be used:
* 0 : No boosting (i.e., core algorithm)
* 1 : Core + concept boosting
* 2 : Core + concept boosting with amplification
Read the documentation in https://wiki.knorex.asia/x/rAMYBQ
port : int, 8205 by default
The port number that will be used by `start_server` method to serve the extraction API
logging : boolean, True by default
Whether to print timing messages for each component in the code
**Attributes**
`forward_index_` : csr_matrix
The terms (rows) to concept definitions (columns) matrix
`inverted_index_` : csr_matrix
The transpose of `forward_index_`
`term_concept_index_` : csr_matrix
The terms (rows) to concept name (columns) matrix
`vocabulary_` : dict
The mapping from terms to indices
`mapping_` : dict
The inverse mapping of vocabulary
`concepts_` : list
The list of concept names
"""
##################
# Class constant #
##################
COTH1 = 1 / np.tanh(1)
def __init__(self, scorer_name='tfidf', traindir=None, traindir_arff=None, traindir_vocab=None,
trainarff=None, trainarff_vocab=None, testdir=None, testdir_arff=None,
trainpickle=None, trainpickle_vocab=None,
testarff=None, testarff_vocab=None, lowercase=True, keep_nnp=False, transliteration=True,
word_normalization='stem', boost_method=1, port=8205, logging=True):
for karg, value in locals().items():
setattr(self, karg, value)
if self.traindir is None and self.trainarff is None and self.trainpickle is None:
module_path = os.path.dirname(__file__)
self.trainarff = os.path.join(module_path, 'training_data/wikipedia_larger.arff')
self.trainarff_vocab = os.path.join(module_path, 'training_data/wikipedia_larger.vocab')
self.lowercase = True
self.keep_pos = False
self.transliteration = True
self.forward_index_ = None
self.inverted_index_ = None
self.term_concept_index_ = None
self.concepts_ = None
self.vocabulary_ = None
self.mapping_ = None
@property
def boost_method(self):
return self._boost_method
@boost_method.setter
def boost_method(self, value):
self._boost_method = value
if value == 1:
self.boost_concept = True
self.boost_lower = False
elif value == 2:
self.boost_concept = True
self.boost_lower = True
else:
self.boost_concept = False
self.boost_lower = False
@staticmethod
def gk_rank_similarity(input_rank, concept_rank):
"""Compare two rankings based on Goodman and Kruskal's rank correlation
"""
word_to_rank_input = dict((word, rank) for rank, word in enumerate(input_rank))
word_to_rank_concept = dict((word, rank) for rank, word in enumerate(concept_rank))
N_s = 0.0
N_d = 0.0
for word1, word2 in combinations(input_rank, 2):
order_input = np.sign(word_to_rank_input[word1] - word_to_rank_input[word2])
order_concept = np.sign(word_to_rank_concept[word1] - word_to_rank_concept[word2])
if order_input == order_concept:
N_s += 1.0
else:
N_d += 1.0
return (1 + ((N_s - N_d) / (N_s + N_d))) / 2
@staticmethod
def spearman_rank_similarity(input_rank, concept_rank):
"""Compare two rankings based on Spearman's rank correlation
"""
word_to_rank_input = dict((word, rank) for rank, word in enumerate(input_rank))
rank_diff = float(sum((rank - word_to_rank_input[word]) ** 2 for rank, word in enumerate(concept_rank)))
size = len(input_rank)
return 1 - (3 * rank_diff) / (size ** 3 - size)
#########################################
# Term weighting scheme helper function #
#########################################
def scorer_tf(self, doc_term_freq, concepts=None, use_existing_data=False):
return doc_term_freq
def scorer_tfidf(self, doc_term_freq, concepts=None, use_existing_data=False, norm='l2'):
if use_existing_data:
(doc_term, _) = tf_to_tfidf(doc_term_freq, idf_diag=self.idf_diag, sublinear_tf=True, smooth_idf=True,
norm=norm)
else:
(doc_term, idf_diag) = tf_to_tfidf(doc_term_freq, sublinear_tf=True, smooth_idf=True, norm=norm)
self.idf_diag = idf_diag
return doc_term
def scorer_okapi(self, doc_term_freq, concepts=None, use_existing_data=False, norm='l2'):
if use_existing_data:
(doc_term, _, _) = tf_to_okapi(doc_term_freq, idfs=self.idfs, avg_doc_len=self.avg_doc_len)
else:
(doc_term, idfs, avg_doc_len) = tf_to_okapi(doc_term_freq)
self.idfs = idfs
self.avg_doc_len = avg_doc_len
return doc_term
def scorer_midf(self, doc_term_freq, concepts=None, use_existing_data=False, norm='l2'):
(doc_term_freq_idf, ) = tf_to_midf(doc_term_freq)
return doc_term_freq_idf
##########################
# Initialization methods #
##########################
def _forward_index_from_directory(self):
"""Build forward index from a directory"""
scorer_name = self.scorer_name
traindir = self.traindir
arff_output = self.traindir_arff
vocab_output = self.traindir_vocab
with Timing('Processing training files in the folder %s...' % traindir, self.logging):
dtf = DocToFeature(lowercase=self.lowercase,
keep_nnp=self.keep_nnp,
transliteration=self.transliteration,
word_normalization=self.word_normalization)
train_doc_term_freq = dtf.doc_to_tf(traindir)
train_file_list = dtf.filelist
vocabulary = dtf.vocabulary
mapping = dtf.mapping
concepts = [filename[filename.rfind('/') + 1:].replace('_', ' ').replace('.txt', '')
for filename in train_file_list]
if arff_output is not None:
with Timing('Dumping TF counts to %s...' % arff_output, self.logging):
docs_arff = FeatureToArff(train_doc_term_freq, relation='TF.IDF')
docs_arff.add_column(concepts, name='concept', type_='string')
docs_arff.dump(arff_output, sparse=True)
pickle_output = '%s.pickle' % arff_output[:arff_output.rfind('.')]
with Timing('Pickling TF counts to %s...' % pickle_output, self.logging):
def task(item, _pickle_output):
with open(_pickle_output, 'wb') as outfile:
pickle.dump(item, outfile, protocol=2)
process = mp.Process(target=task, args=((train_doc_term_freq, concepts), pickle_output))
process.start()
process.join()
train_list_output = '%s.list' % arff_output[:arff_output.rfind('.')]
with Timing('Writing file names of %s into %s...' % (traindir, train_list_output), self.logging):
with(open(train_list_output, 'w')) as filename_output:
for filename in train_file_list:
filename_output.write(filename + '\n')
if vocab_output is not None:
with Timing('Dumping vocabulary to %s...' % vocab_output, self.logging):
with open(vocab_output, 'w') as vocab_output_file:
pickle.dump(vocabulary, vocab_output_file, protocol=2)
with Timing('Calculating feature scores using scorer %s...' % scorer_name, self.logging):
forward_index = self.get_scorer(scorer_name)(train_doc_term_freq)
self.forward_index_ = forward_index
self.num_concepts_, self.num_features_ = forward_index.shape
self.concepts_ = concepts
self.vocabulary_ = vocabulary
self.mapping_ = mapping
def _forward_index_from_arff(self):
"""Build forward index from ARFF file"""
scorer_name = self.scorer_name
arff_file = self.trainarff
vocab_file = self.trainarff_vocab
with Timing('Loading and processing training data from %s using scorer %s...' % (arff_file, scorer_name),
self.logging):
(train_doc_term_freq, concepts) = loadarff(arff_file)
pickle_output = '%s.pickle' % arff_file[:arff_file.rfind('.')]
if not os.path.exists(pickle_output):
def task(item, _pickle_output):
with open(_pickle_output, 'wb') as outfile:
pickle.dump(item, outfile, protocol=2)
process = mp.Process(target=task, args=((train_doc_term_freq, concepts), pickle_output))
process.start()
process.join()
forward_index = self.get_scorer(scorer_name)(train_doc_term_freq)
with Timing('Loading vocabulary from %s...' % vocab_file, self.logging):
with open(vocab_file, 'rb') as infile:
vocabulary = pickle.load(infile)
mapping = {}
for word, idx in vocabulary.iteritems():
mapping[idx] = word
self.forward_index_ = forward_index
self.num_concepts_, self.num_features_ = forward_index.shape
self.concepts_ = concepts
self.vocabulary_ = vocabulary
self.mapping_ = mapping
def _forward_index_from_pickle(self):
"""Build forward index from pickled csr_matrix"""
scorer_name = self.scorer_name
pickle_file = self.trainpickle
vocab_file = self.trainpickle_vocab
with Timing('Loading and processing training data from %s using scorer %s...' % (pickle_file, scorer_name),
self.logging):
with open(pickle_file, 'rb') as infile:
(train_doc_term_freq, concepts) = pickle.load(infile)
forward_index = self.get_scorer(scorer_name)(train_doc_term_freq)
with Timing('Loading vocabulary from %s...' % vocab_file, self.logging):
with open(vocab_file, 'rb') as vocab_file:
vocabulary = pickle.load(vocab_file)
mapping = {}
for word, idx in vocabulary.iteritems():
mapping[idx] = word
self.forward_index_ = forward_index
self.num_concepts_, self.num_features_ = forward_index.shape
self.concepts_ = concepts
self.vocabulary_ = vocabulary
self.mapping_ = mapping
def _invert_index(self):
"""Invert the forward index"""
forward_index = self.forward_index_
with Timing('Inverting index... ', self.logging):
inverted_index = forward_index.transpose(copy=True).tocsr()
# Remove insignificant term-concept association
#inverted_index.data[inverted_index.data<=1e-3] = 0
self.inverted_index_ = inverted_index
# Word informativeness based on:
# http://www.ica.stc.sh.cn/picture/article/176/b8/e2/b5e4932249ec8284bb8a86866ec3/3b0d0bff-0e05-4d26-ba6f-85d15924594f.pdf
# With corrected formula based on the description
with Timing('Getting IDF scores...', self.logging):
df = np.diff(inverted_index.indptr)
idf = np.log(float(self.num_concepts_) / df)
mul = 1.1
exp_df = 0.25*np.sqrt(self.num_concepts_)
fw = mul*abs(np.log(exp_df/df))
word_info = idf - fw
word_info = word_info-min(word_info)
self.word_info_ = word_info/max(word_info)
def _generate_term_concept_index(self):
"""Generate term concept index"""
concepts = self.concepts_
vocabulary = self.vocabulary_
with Timing('Creating term-concept index...', self.logging):
dtf = DocToFeature(lowercase=self.lowercase,
keep_nnp=self.keep_nnp,
transliteration=self.transliteration,
word_normalization=self.word_normalization)
# concept_tf is the term count for each concept name, where each concept name is treated like one document
concept_tf = dtf.str_to_tf(concepts, vocabulary=vocabulary)
# concept_term_index is the normalized count
concept_term_index = self.get_scorer(self.scorer_name)(concept_tf, use_existing_data=True, norm='l2')
# term_concept_index is the transposed matrix from concept_term_index
term_concept_index = concept_term_index.transpose(copy=False).tocsr()
self.term_concept_index_ = term_concept_index
def initialize(self):
"""Initialize the extractor
**Notes**
When initializing from directory (i.e., with traindir specified), please be informed that the training might
take a very long time, depending on the amount of training data.
In Knorex working environment, CountVectorizer in scikit-learn has been modified to support multiprocessing,
and so the initialization process can be faster. It's on the branch "parallel_vectorizer"
Because of that, whenever the scikit-learn is updated, we need to make sure that the "knx_patch_mpcv"
branch is still working.
"""
with Timing('Initializing text processing components...', self.logging):
self.dtf = DocToFeature(lowercase=self.lowercase,
keep_nnp=self.keep_nnp,
transliteration=self.transliteration,
word_normalization=self.word_normalization)
with Timing('Initializing ner-tagger component...', self.logging):
self.ner_tagger = KnorexNERTagger()
with Timing('Initializing np chunker component...', self.logging):
self.np_chunker = MaxentNPChunker()
with Timing('Connect to Wikipedia database...', self.logging):
self.client = MongoClient('localhost', 27017)
self.db = self.client['wikipedia']
self.coll = self.db['TittleId']
if self.traindir:
self._forward_index_from_directory()
elif self.trainarff:
self._forward_index_from_arff()
elif self.trainpickle:
self._forward_index_from_pickle()
else:
raise Exception('No training directory or ARFF or pickle file has been specified')
self._invert_index()
self._generate_term_concept_index()
gc.collect()
def check_initialized(self):
if self.inverted_index_ is None:
raise Exception('Inverted index has not been built! Run initialize() first')
############################
# Batch extraction process #
############################
def extract_from_directory(self, dirname, n=10, with_score=False, extraction_output=None):
"""Extract top concepts and top words from the given directory
**Parameters**
dirname : string
The directory containing files that are to be extracted
n : int, optional, 10 by default
The number of concepts and words to be extracted
with_score : boolean, optional, False by default
Whether to return the score associated with each concept and word
extraction_output : string, optional, None by default
The file name to which the extraction output will be printed as JSON dump.
**Returns**
extraction_output : list
The extraction output will always be returned in a list of tuple, where each tuple contains:
top_concepts : list
This will be a list of strings if with_score=False is used,
otherwise it will be a list of (concept, score) tuple
top_phrases : list
This will be a list of strings if with_score=False is used,
otherwise it will be a list of (phrase, score) tuple
"""
if extraction_output is None:
if dirname.find('/') >= 0:
extraction_output = '%s.out' % dirname[dirname.rfind('/') + 1:]
else:
extraction_output = '%s.out' % dirname
with Timing('Processing test files in the folder %s...' % dirname, self.logging):
results = []
for filename in sorted(os.listdir(dirname), key=lambda x: x.lower()):
if filename == '.DS_Store':
continue
with open(os.path.join(dirname, filename), 'r') as infile:
text = infile.read()
title = filename[:(filename.rfind('.') + len(filename)) % len(filename)]
result = self.extract(text, title=title, n=n, with_score=with_score)
results.append((filename, result))
with Timing('Writing output to %s...' % extraction_output, self.logging):
with open(extraction_output, 'w') as outfile:
json.dump(results, outfile)
return results
######################
# Extraction methods #
######################
def _interpret(self, test_doc_term, test_doc_tf=None, boost_concept=None, boost_lower=None):
"""Convert a term weight matrix into interpretation matrix
The test_doc_tf is used for concept boosting
"""
inverted_index = self.inverted_index_
term_concept_index = self.term_concept_index_
mapping = self.mapping_
concepts = self.concepts_
if boost_concept is None:
boost_concept = self.boost_concept
if boost_lower is None:
boost_lower = self.boost_lower
with Timing('Calculating interpretation vector...', self.logging):
interpretation_vector = test_doc_term * inverted_index
if boost_concept:
if None in [term_concept_index, test_doc_tf]:
LOGGER.warn('Concept boosting requested but either term_concept_index or test_docs_tf is not '
'available!')
else:
# docs_term_index is test_doc_tf being l2-normalized
with Timing('Calculating term weight scores...', self.logging):
docs_term_index = self.get_scorer(self.scorer_name)(test_doc_tf, use_existing_data=True, norm='l2')
with Timing('Calculating concept multiplier...', self.logging):
# Perform concept multiplier calculation for each concept:
# multiplier = 2^(1/sum (w_i.c'_i))
# with c'_i = tanh(1/(1-log(c_i)))/tanh(1) as the modified count
# where w_i is the weight of word i in concept matrix
# and c_i is the normalized count of word i in the document
concept_multiplier = docs_term_index * term_concept_index
if boost_lower:
concept_multiplier.data = self.COTH1 * np.tanh(1 / (1 - np.log(concept_multiplier.data ** 2)))
# The -1 is because this multiplier works as an addition to original matrix
# So later the multiplication can be done efficiently by using (or the equivalent):
# interpretation_vector += interpretation_vector.multiply(concept_multiplier)
concept_multiplier.data = np.exp2(concept_multiplier.data) - 1
if DEBUG: # Debug process: print top 10 multipliers
docs_term_index_lil = docs_term_index.tolil()
top_concept_multiplier_indices = np.argsort(concept_multiplier.getrow(0).toarray()[0])[:-11:-1]
concept_multiplier_lil = concept_multiplier.tolil()
method_name = 'Core'
if boost_concept:
method_name = 'Core + Concept boost'
if boost_lower:
method_name = 'Core + Concept boost with amplification'
print 'Multipliers for %s:' % method_name
for idx in top_concept_multiplier_indices:
concept = concepts[idx]
nonzero_indices = term_concept_index.getcol(idx).nonzero()[0]
print '%s (%f): ' % (concept, concept_multiplier_lil[0, idx]),
for word_idx in nonzero_indices:
print '(%s, %f)' % (mapping[word_idx], docs_term_index_lil[0, word_idx]),
print
print
with Timing('Multiplying coefficients...', self.logging):
interpretation_vector = interpretation_vector + interpretation_vector.multiply(concept_multiplier)
return interpretation_vector
def _take_top_phrases(self, interpretation_vector, test_doc_term, candidate_phrases, named_entities=[], n=10,
with_score=False, k=25, n_ranked=25, rank_sim='spearman_rank_similarity', text=None,
boost_ne = 0.15, max_phrase=0):
"""Return the top n concepts, word, and phrases
interpretation_vector is expected to be a row vector as csr_matrix
"""
concepts = self.concepts_
forward_index = self.forward_index_
vocabulary = self.vocabulary_
mapping = self.mapping_
#num_features = self.num_features_
#num_concepts = self.num_concepts_
word_info = self.word_info_
tokenizer, postprocessor = self.dtf.get_tokenizer(), self.dtf.get_postprocessor()
tokenized = list(tokenizer(text))
# Error checking to make sure that we pass the correct variable types
if not isinstance(interpretation_vector, csr_matrix):
raise TypeError('Expecting csr_matrix for interpretation_vector, got %s' % type(interpretation_vector))
if interpretation_vector.shape[0] != 1:
raise ValueError('Expecting a row vector, found a matrix with %d rows' % interpretation_vector.shape[0])
if not isinstance(test_doc_term, csr_matrix):
raise TypeError('Expecting csr_matrix for test_doc_term, got %s' % type(test_doc_term))
if not isinstance(forward_index, csr_matrix):
raise TypeError('Expecting csr_matrix for forward_index, got %s' % type(forward_index))
with Timing('Sorting concepts...', self.logging):
doc_score = interpretation_vector.toarray()[0]
sorted_concept_indices = np.argsort(doc_score)
top_concept_indices = sorted_concept_indices[:-n - 1:-1]
top_k_indices = sorted_concept_indices[:-k - 1:-1]
if n_ranked > 0:
rank_sim_func = getattr(KeyTermsExtractor, rank_sim)
with Timing('Reranking top concepts...', self.logging):
top_2k_indices = sorted_concept_indices[:-2 * k - 1:-1]
# Find the top n_ranked terms in the input document
word_indices_input = test_doc_term.indices[np.argsort(test_doc_term.data)[:-n_ranked - 1:-1]]
word_indices_set_input = set(word_indices_input)
test_doc_term = test_doc_term.tolil()
concept_to_words = []
#min_overlap = 2000
#max_overlap = 0
#sum_overlap = 0
for concept_idx in top_2k_indices:
concept_vector = forward_index.getrow(concept_idx).tolil()
concept_vector_col_idx = np.array(concept_vector.rows[0])
concept_vector_data = concept_vector.data[0]
# Find the top n_ranked terms in the concept
word_indices_concept = concept_vector_col_idx[np.argsort(concept_vector_data)[:-n_ranked - 1:-1]]
word_indices_set_concept = set(word_indices_concept)
# Combine the top terms in input and in concept
word_indices_union = np.array(list(word_indices_set_input | word_indices_set_concept))
# Gather overlap statistics for analysis purpose (non-essential)
#overlap = len(word_indices_set_concept)+len(word_indices_set_input)-len(word_indices_union)
#min_overlap = min(min_overlap, overlap)
#max_overlap = max(max_overlap, overlap)
#sum_overlap += overlap
# Take the scores for each term in the combined list
filtered_word_scores_input = test_doc_term[:, word_indices_union].toarray()[0]
filtered_word_scores_concept = concept_vector[:, word_indices_union].toarray()[0]
# The next four lines to get sorted list of term indices (i.e. the ranking)
ranked_word_union_indices_input = np.argsort(filtered_word_scores_input)
ranked_word_union_indices_concept = np.argsort(filtered_word_scores_concept)
ranked_word_indices_input = word_indices_union[ranked_word_union_indices_input]
ranked_word_indices_concept = word_indices_union[ranked_word_union_indices_concept]
# The sorted list of term indices are then compared
rank_similarity_score = rank_sim_func(ranked_word_indices_input, ranked_word_indices_concept)
doc_score[concept_idx] *= rank_similarity_score
if DEBUG:
words_input = [mapping[idx] for idx in ranked_word_indices_input]
words_concept = [mapping[idx] for idx in ranked_word_indices_concept]
concept_to_words.append([concept_idx, concepts[concept_idx], words_input, words_concept,
rank_similarity_score, doc_score[concept_idx],
doc_score[concept_idx] * rank_similarity_score])
if DEBUG:
from pprint import pprint
pprint(concept_to_words)
sorted_concept_indices = top_2k_indices[np.argsort(doc_score[top_2k_indices])]
top_concept_indices = list(sorted_concept_indices[:-n - 1:-1])
k = len(sorted_concept_indices >= 1)
top_k_indices = list(sorted_concept_indices[:-k - 1:-1])
#LOGGER.debug('Min overlaps: %d\nMax overlaps: %d\nAvg overlaps: %.2f' %
# (min_overlap, max_overlap, sum_overlap/(2.0*k)))
with Timing('Sorting terms...', self.logging):
# This part is explained in https://wiki.knorex.asia/x/rAMYBQ the "Key Words Extraction Part" section
# Take top k concepts score from the interpretation vector (shape: 1 x k)
top_k_concepts = csr_matrix(doc_score[top_k_indices])
# Multiply each term in the top k concept vectors by term weight in the input text (shape: k x |V|)
concept_word_matrix = forward_index[top_k_indices, :].multiply(scipy.sparse.vstack([test_doc_term] * k))
# Find the maximum term score in each concept (shape: 1 x k)
padded_data = np.pad(concept_word_matrix.data, (0, 1), 'constant', constant_values=0)
scale = csr_matrix(np.maximum.reduceat(padded_data, concept_word_matrix.indptr[:-1]))
# Find the normalizing constant for the top k concepts of the interpretation vector, multiply it to scale
# Now scale contains the normalizing constant multiplied by the maximum term score in each concept
scale = scale * top_k_concepts.sum(axis=1)[0, 0]
# Invert the scale so that later division is just a multiplication with this scale
scale.data = 1 / scale.data
# Normalize the interpretation vector as well as divide each with the maximum term score of each concept
# This completes step 3 (normalizing interpretation vector top_k_concepts) and prepare for step 2
scale = scale.multiply(top_k_concepts)
# When scale is multiplied (matrix multiplication) with the top k concept vectors, we are doing
# step 2 and 4 simultaneously, resulting in a 1 x |V| vector containing the desired term score
word_affinity_vector = scale * concept_word_matrix
word_affinity_vector = word_affinity_vector.toarray()[0]
top_terms_indices = [i for i in np.argsort(word_affinity_vector)[:-n - 1:-1]
if word_affinity_vector[i] > 0]
# top_terms_indices = []
def WordLength(tokens):
punct = '.,()&[]\'\"-/\\\n'
res = 0
for x in tokens:
if x not in punct:
res += 1
return res
def dist(word, tokenized):
pos = tokenized.index(word)
return 0.5 - 1.0/(len(tokenized) - pos + 1)
def calcMaxPhrases(lDoc):
nPhrases = len(candidate_phrases)
if nPhrases < 3:
return nPhrases
if nPhrases < 19:
return nPhrases/3 + 2
return int(round(nPhrases/math.log(nPhrases))) + 1
# tokset = set()
# for phrase in candidate_phrases:
# for x in tokenizer(phrase):
# tokset.add(postprocessor(x))
# nToks = len(tokset)
# return (nPhrases + nToks)/10, (nPhrases + nToks + lDoc)/20
shortThres = 250
#Update word_affinity_vector for short text
lDoc = WordLength(tokenized)
if lDoc < shortThres:
tokenized = map(postprocessor, tokenized)
tokenized = [token for token in tokenized if token in vocabulary]
for word in set(tokenized):
wid = vocabulary[word]
word_affinity_vector[wid] = word_info[wid]*dist(word, tokenized)
def ILPSolver(named_entities=[], regularizer=0.00, max_phrase=15, min_len=0.0, TOL=0.00001, w_ne=2.0,
postprocessor=postprocessor, tokenizer=tokenizer):
def build_word_list(token_phrases):
res = []
for x in token_phrases:
res.extend(x)
return list(set(res))
def build_substr(token_phrases):
def cal_prefix_score(l1, l2):
len_l1, len_l2, res = len(l1), len(l2), 0.0
for i, x in enumerate(l1):
if i == len_l2:
break
if x == l2[i]:
res += 1.0
else:
break
return res/len_l1
def cal_suffix_score(t1, t2):
l1, l2 = list(reversed(t1)), list(reversed(t2))
len_l1, len_l2, res = len(l1), len(l2), 0.0
for i, x in enumerate(l1):
if i == len_l2:
break
if x == l2[i]:
res += 1.0
else:
break
return res/len_l1
res = []
for x1, ls1 in enumerate(token_phrases):
count = 0.0
s1 = ' '.join(ls1)
for x2, ls2 in enumerate(token_phrases):
if x1 != x2:
s2 = ' '.join(ls2)
if s2.find(s1) != -1 and len(s2) != 0:
count += float(len(ls1))/len(ls2)
elif s1.find(s2) == -1 and len(s1)!= 0:
count += cal_suffix_score(ls1, ls2)
count += cal_prefix_score(ls1, ls2)
res.append(count)
return res
def build_ne_reg(phrases_list, named_entities):
res = []
for phrase in phrases_list:
tmp = 0.0
for ne in named_entities:
if phrase.find(ne) != -1:
tmp = w_ne
break
res.append(tmp)
return res
def build_occ_termphrase(TERMS, PHRASES):
res = dict()
for id_terms in TERMS:
tmp = []
for id_phrase in PHRASES:
if occ(id_terms, id_phrase) == 1:
tmp.append(id_phrase)
res[id_terms] = tmp
return res
def occ(id_term, id_phrase):
# term, phrase = mapping[id_term], token_phrases[id_phrase]
term, phrase = word_map(id_term), token_phrases[id_phrase]
if term in phrase:
return 1
return 0
def length_phrase(id_phrase):
tokens = token_phrases[id_phrase]
return len(tokens)
def cal_phrase_score(id_phrase):
score = 0.00
for word in token_phrases[id_phrase]:
wid = word_index(word)
if wid == -1:
continue
if term_vars[wid].varValue > TOL:
score += word_score(wid)
score /= length_phrase(id_phrase)
return abs(score - regularizer*(length_phrase(j) - min_len)/(1.0 + substr[j] + ne_reg[j]))
def phrase_tokenize(phrase, tokenizer=None):
if tokenizer:
res = [x.strip('.,()&[]\'\"-/\\\n ') for x in tokenizer(phrase)]
else:
res = [x.strip('.,()&[]\'\"-/\\\n ') for x in phrase.split()]
res = [x.replace(u'\n', u' ') for x in res if len(x) > 0]
return [postprocessor(x) for x in res]
def word_score(wordIdx):
if wordIdx >= 0:
return word_affinity_vector[wordIdx]
else:
return ne_score[wordIdx]
def word_index(word):
if word in vocabulary:
return vocabulary[word]
if word in ne_vocab:
return ne_vocab[word]
return -1
def word_map(wid):
if wid >= 0:
return mapping[wid]
if wid < -1:
return ne_mapping[wid]
def build_ne_word_score(named_entities, tokenizer=None):
neVocab, neMap, neScore = {}, {}, {}
idx = -2
for named_entity in named_entities:
tokens = phrase_tokenize(named_entity, tokenizer)
boostScore = boost_ne*1.0/len(tokens)
for token in tokens:
if token not in vocabulary:
if token not in neVocab:
neVocab[token] = idx
neMap[idx] = token
neScore[idx] = boostScore
idx -= 1
elif neScore[neVocab[token]] < boostScore:
neScore[neVocab[token]] = boostScore
# elif word_affinity_vector[vocabulary[token]] < TOL:
# word_affinity_vector[vocabulary[token]] = boostScore
return neVocab, neMap, neScore
phrases_list = list(candidate_phrases)
token_phrases = [phrase_tokenize(x, tokenizer) for x in phrases_list]
word_list = build_word_list(token_phrases)
substr = build_substr(token_phrases)
ne_reg = build_ne_reg(phrases_list, named_entities)
ne_vocab, ne_mapping, ne_score = build_ne_word_score(named_entities, tokenizer)
TERMS = [word_index(word) for word in word_list if word_index(word) != -1]
# TERMS = [vocabulary[word] for word in word_list if word in vocabulary] #word_id_list
PHRASES = range(len(phrases_list))
prob = LpProblem("TermPhrase", LpMaximize)
term_vars = LpVariable.dicts("UseTerm", TERMS, 0, 1, LpBinary)
phrase_vars = LpVariable.dicts("UsePhrase", PHRASES, 0, 1, LpBinary)
# prob += lpSum(term_vars[i]*word_affinity_vector[i] for i in TERMS) \
prob += lpSum(term_vars[i]*word_score(i) for i in TERMS) \
- regularizer*lpSum(phrase_vars[j]*(length_phrase(j) - min_len)/(1.0 + substr[j] + ne_reg[j])
for j in PHRASES)
prob += lpSum(phrase_vars[j] for j in PHRASES) <= max_phrase
for j in PHRASES:
for i in TERMS:
if occ(i,j) == 1:
prob += phrase_vars[j] <= term_vars[i]
occ_termphrase = build_occ_termphrase(TERMS, PHRASES)
for i in TERMS:
prob += lpSum(phrase_vars[j] for j in occ_termphrase[i]) >= term_vars[i]
prob.solve()
# top_terms = [(mapping[i], word_affinity_vector[i]) for i in TERMS if term_vars[i].varValue > TOL]
top_terms = [(word_map(i), word_score(i)) for i in TERMS if term_vars[i].varValue > TOL]
top_phrases = [(phrases_list[j], cal_phrase_score(j)) for j in PHRASES if phrase_vars[j].varValue > TOL]
return (sorted(top_terms, key=lambda x: x[1], reverse=True),
sorted(top_phrases, key=lambda x: x[1], reverse=True))
if max_phrase == 0:
max_phrase = calcMaxPhrases(lDoc)
with Timing('Solving ILP problem...', self.logging):
top_terms, top_phrases = ILPSolver(named_entities=named_entities, regularizer=0.01, max_phrase=max_phrase)
if with_score:
top_concepts = [(concepts[i], doc_score[i]) for i in top_concept_indices]
else:
top_concepts = [concepts[i] for i in top_concept_indices]
top_terms = [x[0] for x in top_terms]
top_phrases = [x[0] for x in top_phrases]
if len(top_terms) > n:
top_terms = top_terms[:n]
return (top_concepts, top_terms, top_phrases)
def extract_candidate_phrases(self, preprocessed_data, np_chunker=None, lemmatizer=None, named_entities=[]):
"""Returns list of phrases suitable as key words from the text.
**Parameters**
preprocessed_data : str
The text which has been preprocessed (e.g., through get_preprocessor())
np_chunker : knx.text.chunker.NPChunker
The noun phrase chunker that will be used to find noun phrases in the text
lemmatizer : knx.text.doc_to_feature.Lemmatizer
The lemmatizer that will be used to determine whether two noun phrases are the same
**Returns**
phrases : list of tuple: phrase, start index, end index in the orginal text
The return value will just be list of noun phrases as string
"""
def adjust_postags(chunked_phrases):
def alnum(word):
return not word.isalpha() and not word.isdigit() and word.isalnum()
for phrase in chunked_phrases:
for idx, word_pos in enumerate(phrase):
word, pos = word_pos
if pos == 'CD' and alnum(word):
phrase[idx] = (word, 'JJ')
return chunked_phrases
def join_phrase(tokens):
res, prev = str(), str()
for token in tokens:
if token == '$' and len(prev) == 2 and prev.isupper():
res += token
elif prev == '$' and len(token) > 0 and token[0].isdigit():
res += token
elif token == '%' and len(prev) > 0 and prev[0].isdigit():
res += token
elif token == ',':
res += token
elif token == '&' and len(prev) <= 2:
res += token
elif prev == '&' and len(token) <= 2:
res += token
else:
res += (' ' + token if len(res) > 0 else token)
prev = token
return res
def find_phrases(chunked_phrases):
"""
Find the position of the phrases in the orginal text (preprocessed_data)
"""
res = []
prev = 0
for chunked_phrase in chunked_phrases:
phrase = join_phrase(zip(*chunked_phrase)[0])
# phrase = ' '.join(zip(*chunked_phrase)[0])
sid = preprocessed_data.find(phrase, prev)
if sid != -1:
prev = eid = sid + len(phrase)
else:
eid, prev = -1, prev + 1
tmp = (chunked_phrase, sid, eid)
res.append(tmp)
return res
def find_ne(position, ne_type=str()):
for se, ee, named_entity, ne_type1 in named_entities:
if se <= position < ee and (ne_type == ne_type1 or not ne_type):
return (se, ee, named_entity, ne_type1)
return tuple()
def contain_CC(chunked_phrase):
for i, word_pos in enumerate(chunked_phrase):
if word_pos[1] == 'CC' and word_pos[0] != '&':
return i
return -1
def countAND(chunked_phrase):
res = []
for idx, word_pos in enumerate(chunked_phrase):
if word_pos[0] == 'and':
res.append(idx)
return res
def checkDT(phrase):
chunked_phrase, sp, ep = phrase
DTWords = {u'the', u'that', u'those', u'these', u'this'}
if len(chunked_phrase) != 2 or chunked_phrase[0][1] != 'DT':
return True
if chunked_phrase[0][0].lower() not in DTWords or chunked_phrase[1][1] not in {'NNS', 'NN'}:
return True
return False
if np_chunker is None:
np_chunker = self.np_chunker
if lemmatizer is None:
lemmatizer = self.dtf.lemmatizer
chunked_phrases = np_chunker.chunk(preprocessed_data, sent_tokenized=False, output_tags=True, split_words=True)
chunked_phrases = adjust_postags(chunked_phrases)
chunked_phrases = find_phrases(chunked_phrases)
def NPString(np, with_pos_tag=True):
if with_pos_tag:
return ' '.join([x[0] + u'/' + x[1] for x in np])
else:
return ' '.join(zip(*np)[0])
#Remove meaningless words if they are not included in any named entities
MEANINGLESS_WORDS = {u'other', u'others', u'such', u'many', u'any', u'etc', u'e.g', u'much',u'someone',
u'anyone', u'someelse', u'anything', u'something', u'nothing', u'everyone', u'everything'}
new_chunked_phrases = []
for chunked_phrase, sp, ep in chunked_phrases:
if sp == -1:
continue
tmp = chunked_phrase[:]
chunked_phrase[:] = []
reduce_id = []
for idx, word_pos in enumerate(tmp):
word, pos = word_pos
if word.lower() not in MEANINGLESS_WORDS or find_ne(sp):
chunked_phrase.append((word,pos))
else:
reduce_id.append(idx)
sp, ep = update_seid(tmp, reduce_id, sp, ep)
if chunked_phrase:
new_chunked_phrases.append((chunked_phrase, sp, ep))
chunked_phrases = new_chunked_phrases
# Eliminate some phrases with pattern: "the/that/those/these/this + NN/NNS"
chunked_phrases = filter(checkDT,chunked_phrases)
# Adjust phrases ended with &/CC
tmp = chunked_phrases
chunked_phrases = []
lphrases, flag = len(tmp), True
for i, phrase in enumerate(tmp):
chunked_phrase, sp, ep = phrase
if not flag or not chunked_phrase:
flag = True
continue
if chunked_phrase[-1] == ('&', 'CC') and i + 1 < lphrases:
nchunked_phrase, nsp, nep = tmp[i + 1]
if nchunked_phrase[0][1] == 'NNP' and ep == nsp:
chunked_phrase.extend(nchunked_phrase)
ep = nep
flag = False
chunked_phrases.append((chunked_phrase, sp, ep))
# Seperate phrases containing two or more conjunctions
tmp = chunked_phrases
chunked_phrases = []
for chunked_phrase, sp, ep in tmp:
if len(countAND(chunked_phrase)) > 0:
prev, nsp, nep = 0, sp, sp
for i, word_pos in enumerate(chunked_phrase):
word, pos = word_pos
if word == 'AND' and not find_ne(nep) and i - prev > 0:
chunked_phrases.append((chunked_phrase[prev:i], nsp, nep - 1))
nsp = nep + len(word) + 1
prev = i + 1
nep += len(word) + 1
else:
chunked_phrases.append((chunked_phrase, sp, ep))
# Filter and modify the noun phrases for suitable KTE outputs
chunked_phrases = filter_noun_phrases(chunked_phrases, lemmatizer)
# Remove percentage named entities
tmp = chunked_phrases
chunked_phrases = []
for chunked_phrase, sp, ep in tmp:
ne = find_ne(sp, "Percentage")
if ne and ne[0] <= sp < ne[1]:
flag = False
for idx, word_pos in enumerate(chunked_phrase):
sp += len(word_pos[0]) + 1
if sp >= ne[1]:
flag = True
break
if flag:
if idx != len(chunked_phrase) - 1:
chunked_phrases.append((chunked_phrase[idx + 1:], sp, ep))
else:
chunked_phrases.append((chunked_phrase, sp, ep))
else:
chunked_phrases.append((chunked_phrase, sp, ep))
# Seperate some phrases containing conjunction
tmp = chunked_phrases
chunked_phrases = []
for chunked_phrase, sp, ep in tmp:
if sp == -1:
continue
idx = contain_CC(chunked_phrase)
flag = True
if idx != -1 and idx != len(chunked_phrase):
if not find_ne(sp):
pos_str = ' '.join(zip(*chunked_phrase)[1])
if pos_str in {'NNP CC NNP', 'NN CC NN', 'NNS CC NNS', 'NNPS CC NNPS'}:
flag = False
chunked_phrases.append(([chunked_phrase[0]], sp, sp + len(chunked_phrase[0][0])))
chunked_phrases.append(([chunked_phrase[2]], ep - len(chunked_phrase[2][0]), ep))
else:
sp2 = sp
for i in range(idx + 1):
sp2 += len(chunked_phrase[i][0]) + 1
spcc = sp2 - len(chunked_phrase[idx][0]) - 1
if find_ne(sp2) and find_ne(ep - 1) and not find_ne(spcc):
flag = False
chunked_phrases.append((chunked_phrase[0:idx], sp, spcc - 1))
chunked_phrases.append((chunked_phrase[idx+1:], sp2, ep))
if flag:
chunked_phrases.append((chunked_phrase, sp, ep))
phrases = []
for chunked_phrase, sp, ep in chunked_phrases:
if not chunked_phrase:
continue
phrase = join_phrase(zip(*chunked_phrase)[0])
# phrase = ' '.join(zip(*chunked_phrase)[0])
phrases.append((phrase, sp, ep))
return phrases
def get_first_paragraph(self, preprocessed_data):
"""Returns the (approximately) first paragraph from the preprocessed data based on newline and period
"""
first_idx = preprocessed_data.find('\n', 25)
if first_idx == -1 or first_idx > 500:
first_idx = preprocessed_data.find('. ', 75) + 1
if first_idx == -1:
return ''
if '.' not in preprocessed_data[first_idx - 4:first_idx]:
tmp = preprocessed_data.find('.', first_idx)
if not (tmp == -1):
first_idx = tmp
return preprocessed_data[:first_idx]
def extract(self, data, title='', n=10, with_score=False, k=25, n_ranked=25, boost_concept=None, boost_lower=None,
rank_sim='spearman_rank_similarity', return_values=['concepts', 'terms', 'phrases'],
ner_tagger=None, np_chunker=None, lemmatizer=None, boost_ne=0.15):
"""Extract top concepts and phrases from a document string
**Parameters**
data : string
The document from which concepts and phrases should be extracted
title : string
The title of the document. This is used to extract important information often found in titles
n : int, optional, 10 by default
The number of top concepts and phrases to be extracted
with_score : boolean, optional, False by default
Whether to include the scores for each concept and phrase
k : int, optional, 25 by default
The parameter that controls the number of concepts should affect the phrase scoring
n_ranked : int, optional, 25 by default
The parameter that controls how many top words in each concept will be considered when reranking
boost_concept : boolean, optional
Whether to boost concept scores.
Will use the value according to the boost_method property if not provided
boost_lower : boolean, optional
Whether to amplify lower scores, only applicable if boost_concept is True
Will use the value according to the boost_method property if not provided
rank_sim : str, one of {'gk_rank_similarity', 'spearman_rank_similarity'}, default to 'spearman_rank_similarity'
The rank similarity measure to rerank the concepts
return_values : collection of str in {'concepts', 'terms', 'phrases'}
The values that will be returned.
concepts : list of top concepts
terms : list of top terms
phrases : list of top phrases
ner_tagger : a ner_tagger object, optional
An object with the method `processDocument` that accepts a string and produce an XML
np_chunker : an NPChunker object, optional
knx.text.chunker.NPChunker object to do the noun phrase chunking
lemmatizer : a Lemmatizer object, optional
An object with the method `lemmatize` that accepts a word and an optional POS tag,
and produces the lemma of the specified word
**Returns**
top_values : dict
The returned value will be a dictionary containing any combination of these mappings, depending on the
`return_values` argument:
'concepts' -> top_concepts : list
This will be a list of concepts if with_score=False is used,
otherwise it will be a list of (concept, score) tuple
'terms' -> top_terms : list
This will be a list of terms if with_score=False is used,
otherwise it will be a list of (term, score) tuple
'phrases' -> top_phrases : list
This will be a list of phrases if with_score=False is used,
otherwise it will be a list of (phrase, score) tuple
"""
def match_ne(phrase, named_entity):
if ((phrase in named_entity or named_entity in phrase)
and abs(len(phrase) - len(named_entity)) < 3):
return True
else:
return False
if ner_tagger is None:
ner_tagger = self.ner_tagger
if np_chunker is None:
np_chunker = self.np_chunker
if lemmatizer is None:
lemmatizer = self.dtf.lemmatizer
if boost_concept is None:
boost_concept = self.boost_concept
if boost_lower is None:
boost_lower = self.boost_lower
with Timing('Converting input into matrix...', self.logging):
# vector_tf = self.dtf.str_to_tf(data.replace('-',' '), self.vocabulary_)
vector_tf = self.dtf.str_to_tf(data, self.vocabulary_)
# LOGGER.info('%s',vector_tf)
with Timing('Preprocessing the input text...', self.logging):
preprocessed = self.dtf.get_preprocessor()(data)
# LOGGER.info('%s',preprocessed)
with Timing('Boosting title and first paragraph...', self.logging):
# first_para = self.get_first_paragraph(preprocessed)
# summary = title + ' . ' + first_para
summary = title + ' . ' + preprocessed
for word in self.dtf.get_analyzer()(title):
word_idx = np.argwhere(vector_tf.indices == self.vocabulary_.get(word, None))
if word_idx:
vector_tf.data[word_idx] = vector_tf.data[word_idx] * 2 + 1
with Timing('Tagging named entities...', self.logging):
banned_type = {u'Currency',u'Percentage', u'Date', u'Time', u'Position', u'Malaysian_title'}
if ner_tagger is not None:
bias = len(title + ' . ')
nes, tmpdupnes = ner_tagger.tag(summary)
full_named_entities = [(x[0] - bias, x[1] - bias, x[2], x[3]) for x in nes]
dupnes = []
for y in tmpdupnes:
tmp = [(x[0] - bias, x[1] - bias, x[2], x[3]) for x in y]
dupnes.append(tmp)
named_entities = [x[2] for x in full_named_entities if x[3] not in banned_type]
named_entities = list(set(named_entities))
banned_named_entities = [x[2] for x in full_named_entities if x[3] in banned_type]
banned_named_entities = list(set(banned_named_entities))
else:
named_entities = banned_named_entities = full_named_entities = []
if self.logging:
LOGGER.warn('Not running ner tagger')
with Timing('Extracting candidate phrases...', self.logging):
candidate_phrases = self.extract_candidate_phrases(preprocessed, np_chunker, lemmatizer,
full_named_entities)
# LOGGER.info('%s',candidate_phrases)
with Timing('Processing noun phrases with named entities...', self.logging):
#Correct noun phrases containing incompleted named entities
tmp = candidate_phrases[:]
candidate_phrases = []
for phrase, sp, ep in tmp:
found = False
for se, ee, named_entity, ne_type in full_named_entities:
if se < sp < ee < ep:
found = True
new_phrase = preprocessed[se:sp] + phrase
candidate_phrases.append((new_phrase, se, ep))
break
if sp < se < ep < ee:
found = True
new_phrase = phrase + preprocessed[ep:ee]
candidate_phrases.append((new_phrase, sp, ee))
break
if se <= sp < ep <= ee:
found = True
candidate_phrases.append((named_entity, se, ee))
break
if not found:
candidate_phrases.append((phrase, sp, ep))
#Eliminating some noun phrases (Percentage, Currency ...) standing alone
tmp = candidate_phrases[:]
candidate_phrases = []
for phrase in tmp:
found = False
for named_entity in banned_named_entities:
if match_ne(phrase[0], named_entity):
found = True
break
if not found:
candidate_phrases.append(phrase)
#Adding some named entities not belong to any phrases
for named_entity in named_entities:
found = True
for phrase, sp, ep in candidate_phrases:
if named_entity in phrase:
found = True
break
if not found:
candidate_phrases.add((named_entity, se, ee))
with Timing('Eliminate nearly duplicated noun phrases based on morphological features...', self.logging):
def compare(phrase1, phrase2):
def compare1(token1, token2):
t1, t2 = token1.lower(), token2.lower()
if t1[-1] in 's.,' and t1[0:-1] == t2: return 1
if t1[-2:] == 'es':
if t1[:-2] == t2: return 1
if t1[:-3] == t2[:-1] and t1[-3] == 'i' and t2[-1] == 'y': return 1
if t2[-1] in 's.,' and t2[0:-1] == t1: return -1
if t2[-2:] == 'es':
if t2[:-2] == t1: return -1
if t2[:-3] == t1[:-1] and t2[-3] == 'i' and t1[-1] == 'y': return -1
if t1 != t2:
return -2
return 0
def compare2(token1, token2):
if token1 == token2:
return 0
if token1[0].isupper() and token2[0].islower():
return 1
if token2[0].isupper() and token1[0].islower():
return -1
if token1.isupper() and (token2.islower() or token2.istitle()):
return 1
if token2.isupper() and (token1.islower() or token1.istitle()):
return -1
return 0
lphrase1, lphrase2 = phrase1.split(), phrase2.split()
if len(lphrase1) != len(lphrase2):
return 0
s1 = s2 = 0
for i in range(len(lphrase1)):
cmp1 = compare1(lphrase1[i], lphrase2[i])
if cmp1 == -2:
return 0
cmp2 = compare2(lphrase1[i], lphrase2[i])
s1 += cmp1
s2 += cmp2
if s1 < 0:
return -1
elif s1 > 0:
return 1
if s2 < 0:
return -1
elif s2 > 0:
return 1
return 0
flag = [True]*len(candidate_phrases)
for id1, phrase1 in enumerate(candidate_phrases):
if not flag[id1]:
continue
for id2, phrase2 in enumerate(candidate_phrases):
if id1 >= id2:
continue
t_cmp = compare(phrase1[0], phrase2[0])
if t_cmp == -1:
flag[id1] = False
break
elif t_cmp == 1:
flag[id2] = False
candidate_phrases = [x for i, x in enumerate(candidate_phrases) if flag[i]]
with Timing('Eliminate duplicated named enitites...', self.logging):
def get_ne_id(phrases):
def find_ne(sid, named_entities):
for idx, ne in enumerate(named_entities):
se, ee, named_entity, ne_type = ne
if se <= sid < ee:
return idx
return -1
res = dict()
for phrase, sid, eid in phrases:
flag = True
for lidx, named_entities in enumerate(dupnes):
idx = find_ne(sid, named_entities)
if idx != -1:
res[phrase, sid, eid] = (lidx, idx)
flag = False
break
if flag:
res[phrase, sid, eid] = (-1, -1)
return res
def compare_ne(phrase1, phrase2):
p1, sid1, eid1 = phrase1
p2, sid2, eid2 = phrase2
if sid1 == -1 or sid2 == -1:
return 0
neidx1, neidx2 = ne_list_id[phrase1], ne_list_id[phrase2]
if neidx1[0] != -1:
ne1 = dupnes[neidx1[0]][neidx1[1]]
if neidx2[0] == -1:
return 0
ne2 = dupnes[neidx2[0]][neidx2[1]]
if ne1[2] == ne2[2]:
return 0
sidne1, eidne1 = ne1[0] - sid1, ne1[1] - sid1
sidne2, eidne2 = ne2[0] - sid2, ne2[1] - sid2
if p1[0:sidne1] == p2[0:sidne2] and p1[eidne1:] == p2[eidne2:]:
if len(p1) > len(p2):
return 1
elif len(p1) < len(p2):
return -1
return 0
ne_list_id = get_ne_id(candidate_phrases)
flag = [True]*len(candidate_phrases)
for id1, phrase1 in enumerate(candidate_phrases):
if not flag[id1]:
continue
for id2, phrase2 in enumerate(candidate_phrases):
if id1 >= id2:
continue
cmp_ne = compare_ne(phrase1, phrase2)
if cmp_ne == -1:
flag[id1] = False
break
elif cmp_ne == 1:
flag[id2] = False
candidate_phrases = [x for i, x in enumerate(candidate_phrases) if flag[i]]
with Timing('Eliminate based on Wikipedia direct machanism...', self.logging):
#Eliminating duplicated noun phrases by creating a set of phrases (set of strings)
tmp = set()
for phrase, sid, eid in candidate_phrases:
tmp.add(phrase)
def get_wiki_id(candidate_phrases):
res = dict()
for s in candidate_phrases:
s1 = s.upper().replace(u' ',u'_')
f = self.coll.find_one({'title': s1}, {'_id': False, 'title': False})
if f == None:
res[s] = unicode()
else:
res[s] = f[u'id']
return res
def compare_wiki_direct(phrase1, phrase2):
wiki_id1, wiki_id2 = wiki_id[phrase1], wiki_id[phrase2]
if wiki_id1 == str() or wiki_id2 == str():
return 0
if wiki_id1 == wiki_id2:
if len(phrase1) > len(phrase2):
return 1
elif len(phrase1) < len(phrase2):
return -1
return 0
wiki_id = get_wiki_id(tmp)
flag = [True]*len(tmp)
for id1, phrase1 in enumerate(tmp):
if not flag[id1]:
continue
for id2, phrase2 in enumerate(tmp):
if id1 >= id2:
continue
cmp_wiki = compare_wiki_direct(phrase1, phrase2)
if cmp_wiki == -1:
flag[id1] = False
break
elif cmp_wiki == 1:
flag[id2] = False
candidate_phrases = [x for i, x in enumerate(tmp) if flag[i]]
candidate_phrases = [x for x in candidate_phrases if len(x.split()) <= 9]
with Timing('Scoring each term in the matrix and calculating interpretation vector...', self.logging):
vector = self.get_scorer(self.scorer_name)(vector_tf, use_existing_data=True)
interpretation_vect = self._interpret(vector, test_doc_tf=vector_tf,
boost_concept=boost_concept, boost_lower=boost_lower)
if DEBUG:
with Timing('Drawing data graph...', self.logging):
draw_vector(interpretation_vect)
top_concepts, top_terms, top_phrases = self._take_top_phrases(interpretation_vect,
test_doc_term=vector,
candidate_phrases=candidate_phrases,
named_entities=named_entities,
n=n, with_score=with_score,
k=k, n_ranked=n_ranked,
rank_sim=rank_sim, text=preprocessed,
boost_ne=boost_ne)
result = dict()
if 'concepts' in return_values:
result['concepts'] = top_concepts
if 'terms' in return_values:
result['terms'] = top_terms
if 'phrases' in return_values:
result['phrases'] = top_phrases
return result
def extract_batch(self, doc_list, title='', n=10, with_score=False, k=25, n_ranked=25,
return_values=['concepts', 'terms', 'phrases'],
boost_concept=None, boost_lower=None, rank_sim='spearman_rank_similarity'):
"""Extract top concepts, terms, or phrases from a list of documents
**Parameters**
doc_list : list
The list of strings from which concepts and phrases should be extracted
title : string
The title of the document. This is used to extract important information often found in titles
n : int, optional, 10 by default
The number of top concepts and phrases to be extracted
with_score : boolean, optional, False by default
Whether to include the scores for each concept and phrase
k : int, optional, 25 by default
The parameter that controls the number of concepts should affect the phrase scoring
n_ranked : int, optional, 25 by default
The parameter that controls how many top words in each concept will be considered when reranking
return_values : collection of str in {'concepts', 'terms', 'phrases'}
The values that will be returned.
concepts : list of top concepts
terms : list of top terms
phrases : list of top phrases
boost_concept : boolean, optional
Whether to boost concept scores.
Will use the value according to the boost_method property if not provided
boost_lower : boolean, optional
Whether to amplify lower scores, only applicable if boost_concept is True
Will use the value according to the boost_method property if not provided
**Returns**
result : list of dict
The returned value will be a list of dictionaries, where each tuple is the extraction result from one
document, in the same order as given.
Each dictionary will contain any combination of these mappings, depending on the `return_values` argument:
'concepts' -> top_concepts : list
This will be a list of concepts if with_score=False is used,
otherwise it will be a list of (concept, score) tuple
'terms' -> top_terms : list
This will be a list of terms if with_score=False is used,
otherwise it will be a list of (term, score) tuple
'phrases' -> top_phrases : list
This will be a list of phrases if with_score=False is used,
otherwise it will be a list of (phrase, score) tuple
"""
old_logging = self.logging
self.logging = False
with Timing('Extracting top concepts and phrases from list of texts...', old_logging):
result = [self.extract(data, title=title, n=n, with_score=with_score, k=k, n_ranked=n_ranked,
return_values=return_values, boost_concept=boost_concept, boost_lower=boost_lower,
rank_sim=rank_sim) for data in doc_list]
self.logging = old_logging
return result
#####################
# General utilities #
#####################
def start_server(self, basedir=os.path.dirname(__file__), index_file='static/extract.html', n_jobs=1):
"""Start a web server serving the extraction API at /extract
**Parameters**
basedir : string, optional, defaults to current directory
The base directory to serve static files.
If None, no static files will be served.
index_file : string, optional, defaults to "static/extract.html"
The page to be served as interface.
n_jobs : int, optional, defaults to 1
The number of processes to run. This determines the number of concurrent requests that can be handled
simultaneously.
"""
self.check_initialized()
def _extract_process(extractor, pipe, ner_tagger, np_chunker):
while True:
params = pipe.recv()
try:
result = extractor.extract(ner_tagger=ner_tagger, np_chunker=np_chunker, **params)
except Exception:
LOGGER.error('Title: %s\nText: %s' % (params['title'], params['data']), exc_info=True)
result = {}
pipe.send(result)
if basedir is not None:
parent_pipes = []
workers = []
if n_jobs < 1:
n_jobs += mp.cpu_count()
for i in xrange(n_jobs):
parent_pipe, child_pipe = mp.Pipe()
semaphore = threading.Semaphore()
if self.ner_tagger is not None:
ner_tagger = KnorexNERTagger()
# ner_gateway = JavaGateway(GatewayClient(port=48100), auto_convert=True, auto_field=True)
# ner_tagger = ner_gateway.entry_point
else:
ner_tagger = None
np_chunker = MaxentNPChunker()
worker = mp.Process(target=_extract_process, args=(self, child_pipe, ner_tagger, np_chunker))
worker.start()
workers.append(worker)
parent_pipes.append((parent_pipe, semaphore))
application = Application([
(r'/', MainHandler, {'index_file': index_file}),
(r'/version', VersionHandler),
(r'/extract', ConceptExtractorHandler, {'pipes': parent_pipes}),
(r'/static/(.*)', StaticFileHandler, {'path': os.path.join(basedir, 'static')}),
])
else:
application = Application([
(r'/extract', ConceptExtractorHandler, {'extractor': self}),
])
application.listen(self.port)
print 'Server started at port %d' % self.port
try:
IOLoop.instance().start()
except KeyboardInterrupt:
for worker in workers:
worker.join(timeout=1)
worker.terminate()
worker.join()
def get_scorer(self, scorer_name):
global scorers
return getattr(self, scorers[scorer_name])
############################
# Web application handlers #
############################
class ConceptExtractorHandler(RequestHandler):
"""Web application handler
"""
def initialize(self, pipes):
self.pipes = pipes
@asynchronous
def get(self):
text = self.get_argument('data', default=None)
title = self.get_argument('title', default='')
boost_method = self.get_argument('boost_method', default='1')
n = self.get_argument('n', default='10')
k = self.get_argument('k', default='25')
n_ranked = self.get_argument('n_ranked', default='25')
rank_sim = self.get_argument('rank_sim', default='spearman_rank_similarity')
return_values_str = self.get_argument('return_values', default='concepts,terms,phrases')
return_values = [value.strip() for value in return_values_str.split(',')]
callback = self.get_argument('callback', default=None)
try:
boost_method = int(boost_method)
except ValueError:
boost_method = 1
if boost_method == 1:
boost_concept = True
boost_lower = False
elif boost_method == 2:
boost_concept = True
boost_lower = True
else:
boost_concept = False
boost_lower = False
try:
n = int(n)
except ValueError:
n = 10
if n < 1:
n = 1
try:
k = int(k)
except ValueError:
k = 25
if k < 1:
k = 1
try:
n_ranked = int(n_ranked)
except ValueError:
n_ranked = 25
if n_ranked < 0:
n_ranked = 0
if rank_sim not in {'gk_rank_similarity', 'spearman_rank_similarity'}:
rank_sim = 'spearman_rank_similarity'
params = {'data': text,
'title': title,
'with_score': True,
'n': n,
'k': k,
'n_ranked': n_ranked,
'return_values': return_values,
'boost_concept': boost_concept,
'boost_lower': boost_lower,
'rank_sim': rank_sim
}
threading.Thread(target=self._get_thread, args=(params, callback)).start()
def _on_finish(self, result, callback):
"""Finisher function for asynchronous get method
"""
self.set_header("Content-Type", "application/json")
if callback:
self.write('%s(%s)' % (callback, result))
else:
self.write(result)
self.flush()
self.finish()
def _get_thread(self, params, callback):
result = ''
try:
if params['data'] is not None:
available_pipe = None
semaphore = None
while not available_pipe:
for pipe, sem in self.pipes:
if sem.acquire(False):
available_pipe = pipe
semaphore = sem
break
if not available_pipe:
time.sleep(2.0 / len(self.pipes))
start_time = time.time()
try:
available_pipe.send(params)
extraction_output = available_pipe.recv()
finally:
LOGGER.info('Extraction done in %.3fs' % (time.time() - start_time))
semaphore.release()
result = json.dumps(extraction_output)
finally:
IOLoop.instance().add_callback(lambda: self._on_finish(result, callback))
@asynchronous
def post(self):
return self.get()
class VersionHandler(RequestHandler):
def get(self):
self.write(VERSION)
def post(self):
self.get()
class MainHandler(RequestHandler):
def initialize(self, index_file):
self.index_file = index_file
def get(self):
self.redirect(self.index_file, permanent=True)
def main():
sys.stdout = Unbuffered(sys.stdout)
parsed = parse_arguments()
parsed = dict(**vars(parsed))
start = parsed['start']
del(parsed['start'])
n_jobs = parsed['n_jobs']
del(parsed['n_jobs'])
extractor = KeyTermsExtractor(**parsed)
extractor.initialize()
if extractor.testdir:
extractor.extract_from_directory(parsed.testdir, with_score=False, arff_output=parsed.testdir_arff)
elif start == 1:
extractor.start_server(n_jobs=n_jobs)
elif start == 2:
from pprint import pprint
while True:
text = raw_input('Text: ')
pprint(extractor.extract(text, with_score=True, n=20))
if __name__ == '__main__':
main()
| apache-2.0 |
okadate/romspy | romspy/tplot/tplot_station.py | 1 | 2385 | # coding: utf-8
# (c) 2015-11-28 Teruhisa Okada
import netCDF4
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
import numpy as np
import pandas as pd
import romspy
def resample(date, var, **kw):
rule = kw.pop('resample', 'D')
if rule == 'H':
loffset = '-30min'
elif rule == 'D':
loffset = '-12H'
elif rule == 'M':
loffset = '-15D'
df = pd.DataFrame({'sur':var[:,-1], 'bot':var[:,0]}, index=date)
df = df.resample(rule, loffset=loffset)
return df.index.values, df.sur.values, df.bot.values
def tplot_station_main(stafile, vname, station, dates, **kw):
print stafile, vname, station, dates
ax = kw.pop('ax', None)
date_format = kw.pop('date_format', '%Y-%m')
cff = kw.pop('cff', 1.0)
#ntime = kw.pop('ntime', 8785)
if ax is None:
ax = plt.gca()
nc = netCDF4.Dataset(stafile, 'r')
time = nc.variables['ocean_time']
time = np.linspace(time[0], time[-1], len(time))
date = netCDF4.num2date(time, romspy.JST)
var = nc.variables[vname][:,station-1,[0,19]] * cff
date, sur, bot = resample(date, var, **kw)
ax.plot(date, sur, '-', lw=1.5, color='#4D71AF', label='surface')
ax.plot(date, bot, '-', lw=1.5, color='#C34F53', label='bottom')
ax.legend(loc='best')
ax.set_title('Sta.{}'.format(station))
ax.set_ylabel(vname)
ax.set_xlim(dates[0], dates[-1])
ax.xaxis.set_major_formatter(DateFormatter(date_format))
def tplot_station(stafile, vname, station, dates, **kw):
if 'N' in vname:
cff = romspy.mol2g_N
elif 'P' in vname:
cff = romspy.mol2g_P
#elif 'plankton' in vname:
# cff = romspy.mol2g_N
elif 'oxygen' in vname:
cff = romspy.mol2g_O2
else:
cff = 1.0
tplot_station_main(stafile, vname, station, dates, cff=cff, **kw)
if __name__ == '__main__':
import seaborn as sns
import datetime
stafile = '/home/okada/ism-i/apps/OB500P/case25/NL2/ob500_sta.nc'
vname = 'phytoplankton'
stations = [3,4,5,6,12,13]
dates = [datetime.datetime(2012,1,1,0), datetime.datetime(2013,1,1,0)]
fig, axes = plt.subplots(6,1, figsize=[10,15])
plt.subplots_adjust(hspace=0.4)
for station, ax in zip(stations, axes):
tplot_station(stafile, vname, station, dates, ax=ax, date_format='%m/%d')
ax.set_ylim(-1,1)
plt.show()
| mit |
python-visualization/folium | tests/plugins/test_fast_marker_cluster.py | 2 | 2514 | # -*- coding: utf-8 -*-
"""
Test FastMarkerCluster
----------------------
"""
import folium
from folium.plugins import FastMarkerCluster
from folium.utilities import normalize
from jinja2 import Template
import numpy as np
import pandas as pd
import pytest
def test_fast_marker_cluster():
n = 100
np.random.seed(seed=26082009)
data = np.array([
np.random.uniform(low=35, high=60, size=n),
np.random.uniform(low=-12, high=30, size=n),
]).T
m = folium.Map([45., 3.], zoom_start=4)
mc = FastMarkerCluster(data).add_to(m)
out = normalize(m._parent.render())
# We verify that imports
assert '<script src="https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.1.0/leaflet.markercluster.js"></script>' in out # noqa
assert '<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.1.0/MarkerCluster.css"/>' in out # noqa
assert '<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.1.0/MarkerCluster.Default.css"/>' in out # noqa
# Verify the script part is okay.
tmpl = Template("""
var {{ this.get_name() }} = (function(){
{{ this.callback }}
var data = {{ this.data|tojson }};
var cluster = L.markerClusterGroup({{ this.options|tojson }});
{%- if this.icon_create_function is not none %}
cluster.options.iconCreateFunction =
{{ this.icon_create_function.strip() }};
{%- endif %}
for (var i = 0; i < data.length; i++) {
var row = data[i];
var marker = callback(row);
marker.addTo(cluster);
}
cluster.addTo({{ this._parent.get_name() }});
return cluster;
})();
""")
expected = normalize(tmpl.render(this=mc))
assert expected in out
@pytest.mark.parametrize('case', [
np.array([[0, 5, 1], [1, 6, 1], [2, 7, 0.5]]),
[[0, 5, 'red'], (1, 6, 'blue'), [2, 7, {'this': 'also works'}]],
pd.DataFrame([[0, 5, 'red'], [1, 6, 'blue'], [2, 7, 'something']],
columns=['lat', 'lng', 'color']),
])
def test_fast_marker_cluster_data(case):
data = FastMarkerCluster(case).data
assert isinstance(data, list)
assert len(data) == 3
for i in range(len(data)):
assert isinstance(data[i], list)
assert len(data[i]) == 3
assert data[i][0] == float(i)
assert data[i][1] == float(i + 5)
| mit |
sasdelli/lc_predictor | lc_predictor/savgol.py | 1 | 3104 | import numpy as np
# This is Thomas Haslwanter's implementation at:
# http://wiki.scipy.org/Cookbook/SavitzkyGolay
def savitzky_golay(y, window_size, order, deriv=0):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techhniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError, msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv]
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m, y, mode='valid')
| gpl-3.0 |
Clyde-fare/scikit-learn | benchmarks/bench_multilabel_metrics.py | 276 | 7138 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
anurag313/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/build/lib.linux-i686-2.7/matplotlib/backends/backend_gtk3agg.py | 6 | 3144 | import cairo
import numpy as np
import sys
import warnings
import backend_agg
import backend_gtk3
from matplotlib.figure import Figure
from matplotlib import transforms
if sys.version_info[0] >= 3:
warnings.warn("The Gtk3Agg backend is not known to work on Python 3.x.")
class FigureCanvasGTK3Agg(backend_gtk3.FigureCanvasGTK3,
backend_agg.FigureCanvasAgg):
def __init__(self, figure):
backend_gtk3.FigureCanvasGTK3.__init__(self, figure)
self._bbox_queue = []
def _renderer_init(self):
pass
def _render_figure(self, width, height):
backend_agg.FigureCanvasAgg.draw(self)
def on_draw_event(self, widget, ctx):
""" GtkDrawable draw event, like expose_event in GTK 2.X
"""
allocation = self.get_allocation()
w, h = allocation.width, allocation.height
if not len(self._bbox_queue):
if self._need_redraw:
self._render_figure(w, h)
bbox_queue = [transforms.Bbox([[0, 0], [w, h]])]
else:
return
else:
bbox_queue = self._bbox_queue
for bbox in bbox_queue:
area = self.copy_from_bbox(bbox)
buf = np.fromstring(area.to_string_argb(), dtype='uint8')
x = int(bbox.x0)
y = h - int(bbox.y1)
width = int(bbox.x1) - int(bbox.x0)
height = int(bbox.y1) - int(bbox.y0)
image = cairo.ImageSurface.create_for_data(
buf, cairo.FORMAT_ARGB32, width, height)
ctx.set_source_surface(image, x, y)
ctx.paint()
if len(self._bbox_queue):
self._bbox_queue = []
return False
def blit(self, bbox=None):
# If bbox is None, blit the entire canvas to gtk. Otherwise
# blit only the area defined by the bbox.
if bbox is None:
bbox = self.figure.bbox
allocation = self.get_allocation()
w, h = allocation.width, allocation.height
x = int(bbox.x0)
y = h - int(bbox.y1)
width = int(bbox.x1) - int(bbox.x0)
height = int(bbox.y1) - int(bbox.y0)
self._bbox_queue.append(bbox)
self.queue_draw_area(x, y, width, height)
def print_png(self, filename, *args, **kwargs):
# Do this so we can save the resolution of figure in the PNG file
agg = self.switch_backends(backend_agg.FigureCanvasAgg)
return agg.print_png(filename, *args, **kwargs)
class FigureManagerGTK3Agg(backend_gtk3.FigureManagerGTK3):
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTK3Agg(figure)
manager = FigureManagerGTK3Agg(canvas, num)
return manager
FigureManager = FigureManagerGTK3Agg
show = backend_gtk3.show
| mit |
kashif/scikit-learn | examples/calibration/plot_compare_calibration.py | 82 | 5012 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probabilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilities to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilities closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/indexes/category.py | 7 | 22750 | import numpy as np
import pandas.index as _index
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.types.generic import ABCCategorical, ABCSeries
from pandas.types.common import (is_categorical_dtype,
_ensure_platform_int,
is_list_like,
is_scalar)
from pandas.types.missing import array_equivalent
from pandas.util.decorators import (Appender, cache_readonly,
deprecate_kwarg)
from pandas.core.config import get_option
from pandas.indexes.base import Index, _index_shared_docs
import pandas.core.base as base
import pandas.core.missing as missing
import pandas.indexes.base as ibase
class CategoricalIndex(Index, base.PandasDelegate):
"""
Immutable Index implementing an ordered, sliceable set. CategoricalIndex
represents a sparsely populated Index with an underlying Categorical.
.. versionadded:: 0.16.1
Parameters
----------
data : array-like or Categorical, (1-dimensional)
categories : optional, array-like
categories for the CategoricalIndex
ordered : boolean,
designating if the categories are ordered
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
"""
_typ = 'categoricalindex'
_engine_type = _index.Int64Engine
_attributes = ['name']
def __new__(cls, data=None, categories=None, ordered=None, dtype=None,
copy=False, name=None, fastpath=False, **kwargs):
if fastpath:
return cls._simple_new(data, name=name)
if name is None and hasattr(data, 'name'):
name = data.name
if isinstance(data, ABCCategorical):
data = cls._create_categorical(cls, data, categories, ordered)
elif isinstance(data, CategoricalIndex):
data = data._data
data = cls._create_categorical(cls, data, categories, ordered)
else:
# don't allow scalars
# if data is None, then categories must be provided
if is_scalar(data):
if data is not None or categories is None:
cls._scalar_data_error(data)
data = []
data = cls._create_categorical(cls, data, categories, ordered)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
def _create_from_codes(self, codes, categories=None, ordered=None,
name=None):
"""
*this is an internal non-public method*
create the correct categorical from codes
Parameters
----------
codes : new codes
categories : optional categories, defaults to existing
ordered : optional ordered attribute, defaults to existing
name : optional name attribute, defaults to existing
Returns
-------
CategoricalIndex
"""
from pandas.core.categorical import Categorical
if categories is None:
categories = self.categories
if ordered is None:
ordered = self.ordered
if name is None:
name = self.name
cat = Categorical.from_codes(codes, categories=categories,
ordered=self.ordered)
return CategoricalIndex(cat, name=name)
@staticmethod
def _create_categorical(self, data, categories=None, ordered=None):
"""
*this is an internal non-public method*
create the correct categorical from data and the properties
Parameters
----------
data : data for new Categorical
categories : optional categories, defaults to existing
ordered : optional ordered attribute, defaults to existing
Returns
-------
Categorical
"""
if not isinstance(data, ABCCategorical):
ordered = False if ordered is None else ordered
from pandas.core.categorical import Categorical
data = Categorical(data, categories=categories, ordered=ordered)
else:
if categories is not None:
data = data.set_categories(categories)
if ordered is not None:
data = data.set_ordered(ordered)
return data
@classmethod
def _simple_new(cls, values, name=None, categories=None, ordered=None,
**kwargs):
result = object.__new__(cls)
values = cls._create_categorical(cls, values, categories, ordered)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
result._reset_identity()
return result
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, categories=None, ordered=None,
**kwargs):
# categories and ordered can't be part of attributes,
# as these are properties
if categories is None:
categories = self.categories
if ordered is None:
ordered = self.ordered
return super(CategoricalIndex,
self)._shallow_copy(values=values, categories=categories,
ordered=ordered, **kwargs)
def _is_dtype_compat(self, other):
"""
*this is an internal non-public method*
provide a comparison between the dtype of self and other (coercing if
needed)
Raises
------
TypeError if the dtypes are not compatible
"""
if is_categorical_dtype(other):
if isinstance(other, CategoricalIndex):
other = other._values
if not other.is_dtype_equal(self):
raise TypeError("categories must match existing categories "
"when appending")
else:
values = other
if not is_list_like(values):
values = [values]
other = CategoricalIndex(self._create_categorical(
self, other, categories=self.categories, ordered=self.ordered))
if not other.isin(values).all():
raise TypeError("cannot append a non-category item to a "
"CategoricalIndex")
return other
def equals(self, other):
"""
Determines if two CategorialIndex objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
try:
other = self._is_dtype_compat(other)
return array_equivalent(self._data, other)
except (TypeError, ValueError):
pass
return False
@property
def _formatter_func(self):
return self.categories._formatter_func
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
attrs = [
('categories',
ibase.default_pprint(self.categories,
max_seq_items=max_categories)),
('ordered', self.ordered)]
if self.name is not None:
attrs.append(('name', ibase.default_pprint(self.name)))
attrs.append(('dtype', "'%s'" % self.dtype))
max_seq_items = get_option('display.max_seq_items') or len(self)
if len(self) > max_seq_items:
attrs.append(('length', len(self)))
return attrs
@property
def inferred_type(self):
return 'categorical'
@property
def values(self):
""" return the underlying data, which is a Categorical """
return self._data
def get_values(self):
""" return the underlying data as an ndarray """
return self._data.get_values()
@property
def codes(self):
return self._data.codes
@property
def categories(self):
return self._data.categories
@property
def ordered(self):
return self._data.ordered
def __contains__(self, key):
hash(key)
return key in self.values
def __array__(self, dtype=None):
""" the array interface, return my values """
return np.array(self._data, dtype=dtype)
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
return self._data.codes == -1
@Appender(ibase._index_shared_docs['fillna'])
def fillna(self, value, downcast=None):
self._assert_can_do_op(value)
return CategoricalIndex(self._data.fillna(value), name=self.name)
def argsort(self, *args, **kwargs):
return self.values.argsort(*args, **kwargs)
@cache_readonly
def _engine(self):
# we are going to look things up with the codes themselves
return self._engine_type(lambda: self.codes.astype('i8'), len(self))
@cache_readonly
def is_unique(self):
return not self.duplicated().any()
@Appender(base._shared_docs['unique'] % ibase._index_doc_kwargs)
def unique(self):
result = base.IndexOpsMixin.unique(self)
# CategoricalIndex._shallow_copy uses keeps original categories
# and ordered if not otherwise specified
return self._shallow_copy(result, categories=result.categories,
ordered=result.ordered)
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
False: 'first'})
@Appender(base._shared_docs['duplicated'] % ibase._index_doc_kwargs)
def duplicated(self, keep='first'):
from pandas.hashtable import duplicated_int64
codes = self.codes.astype('i8')
return duplicated_int64(codes, keep)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.astype('object')
def get_loc(self, key, method=None):
"""
Get integer location for requested label
Parameters
----------
key : label
method : {None}
* default: exact matches only.
Returns
-------
loc : int if unique index, possibly slice or mask if not
"""
codes = self.categories.get_loc(key)
if (codes == -1):
raise KeyError(key)
return self._engine.get_loc(codes)
def _can_reindex(self, indexer):
""" always allow reindexing """
pass
def where(self, cond, other=None):
"""
.. versionadded:: 0.19.0
Return an Index of same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from
other.
Parameters
----------
cond : boolean same length as self
other : scalar, or array-like
"""
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
from pandas.core.categorical import Categorical
cat = Categorical(values,
categories=self.categories,
ordered=self.ordered)
return self._shallow_copy(cat, **self._get_attributes_dict())
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
if method is not None:
raise NotImplementedError("argument method is not implemented for "
"CategoricalIndex.reindex")
if level is not None:
raise NotImplementedError("argument level is not implemented for "
"CategoricalIndex.reindex")
if limit is not None:
raise NotImplementedError("argument limit is not implemented for "
"CategoricalIndex.reindex")
target = ibase._ensure_index(target)
if not is_categorical_dtype(target) and not target.is_unique:
raise ValueError("cannot reindex with a non-unique indexer")
indexer, missing = self.get_indexer_non_unique(np.array(target))
new_target = self.take(indexer)
# filling in missing if needed
if len(missing):
cats = self.categories.get_indexer(target)
if (cats == -1).any():
# coerce to a regular index here!
result = Index(np.array(self), name=self.name)
new_target, indexer, _ = result._reindex_non_unique(
np.array(target))
else:
codes = new_target.codes.copy()
codes[indexer == -1] = cats[missing]
new_target = self._create_from_codes(codes)
# we always want to return an Index type here
# to be consistent with .reindex for other index types (e.g. they don't
# coerce based on the actual values, only on the dtype)
# unless we had an inital Categorical to begin with
# in which case we are going to conform to the passed Categorical
new_target = np.asarray(new_target)
if is_categorical_dtype(target):
new_target = target._shallow_copy(new_target, name=self.name)
else:
new_target = Index(new_target, name=self.name)
return new_target, indexer
def _reindex_non_unique(self, target):
""" reindex from a non-unique; which CategoricalIndex's are almost
always
"""
new_target, indexer = self.reindex(target)
new_indexer = None
check = indexer == -1
if check.any():
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[check] = -1
cats = self.categories.get_indexer(target)
if not (cats == -1).any():
# .reindex returns normal Index. Revert to CategoricalIndex if
# all targets are included in my categories
new_target = self._shallow_copy(new_target)
return new_target, indexer, new_indexer
def get_indexer(self, target, method=None, limit=None, tolerance=None):
"""
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index. The mask determines whether labels are
found or not in the current index
Parameters
----------
target : MultiIndex or Index (of tuples)
method : {'pad', 'ffill', 'backfill', 'bfill'}
pad / ffill: propagate LAST valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
Notes
-----
This is a low-level method and probably should be used at your own risk
Examples
--------
>>> indexer, mask = index.get_indexer(new_index)
>>> new_values = cur_values.take(indexer)
>>> new_values[-mask] = np.nan
Returns
-------
(indexer, mask) : (ndarray, ndarray)
"""
method = missing.clean_reindex_fill_method(method)
target = ibase._ensure_index(target)
if isinstance(target, CategoricalIndex):
target = target.categories
if method == 'pad' or method == 'backfill':
raise NotImplementedError("method='pad' and method='backfill' not "
"implemented yet for CategoricalIndex")
elif method == 'nearest':
raise NotImplementedError("method='nearest' not implemented yet "
'for CategoricalIndex')
else:
codes = self.categories.get_indexer(target)
indexer, _ = self._engine.get_indexer_non_unique(codes)
return _ensure_platform_int(indexer)
def get_indexer_non_unique(self, target):
""" this is the same for a CategoricalIndex for get_indexer; the API
returns the missing values as well
"""
target = ibase._ensure_index(target)
if isinstance(target, CategoricalIndex):
target = target.categories
codes = self.categories.get_indexer(target)
return self._engine.get_indexer_non_unique(codes)
def _convert_list_indexer(self, keyarr, kind=None):
"""
we are passed a list indexer.
Return our indexer or raise if all of the values are not included in
the categories
"""
codes = self.categories.get_indexer(keyarr)
if (codes == -1).any():
raise KeyError("a list-indexer must only include values that are "
"in the categories")
return None
@Appender(_index_shared_docs['take'])
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
taken = self._assert_take_fillable(self.codes, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=-1)
return self._create_from_codes(taken)
def map(self, mapper):
"""
Apply mapper function to its categories (not codes).
Parameters
----------
mapper : callable
Function to be applied. When all categories are mapped
to different categories, the result will be Categorical which has
the same order property as the original. Otherwise, the result will
be np.ndarray.
Returns
-------
applied : Categorical or np.ndarray.
"""
return self.values.map(mapper)
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return self._create_from_codes(np.delete(self.codes, loc))
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
Raises
------
ValueError if the item is not in the categories
"""
code = self.categories.get_indexer([item])
if (code == -1):
raise TypeError("cannot insert an item into a CategoricalIndex "
"that is not already an existing category")
codes = self.codes
codes = np.concatenate((codes[:loc], code, codes[loc:]))
return self._create_from_codes(codes)
def _append_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
ValueError if other is not in the categories
"""
to_concat = [self._is_dtype_compat(c) for c in to_concat]
codes = np.concatenate([c.codes for c in to_concat])
result = self._create_from_codes(codes, name=name)
# if name is None, _create_from_codes sets self.name
result.name = name
return result
@classmethod
def _add_comparison_methods(cls):
""" add in comparison methods """
def _make_compare(op):
def _evaluate_compare(self, other):
# if we have a Categorical type, then must have the same
# categories
if isinstance(other, CategoricalIndex):
other = other._values
elif isinstance(other, Index):
other = self._create_categorical(
self, other._values, categories=self.categories,
ordered=self.ordered)
if isinstance(other, (ABCCategorical, np.ndarray,
ABCSeries)):
if len(self.values) != len(other):
raise ValueError("Lengths must match to compare")
if isinstance(other, ABCCategorical):
if not self.values.is_dtype_equal(other):
raise TypeError("categorical index comparisions must "
"have the same categories and ordered "
"attributes")
return getattr(self.values, op)(other)
return _evaluate_compare
cls.__eq__ = _make_compare('__eq__')
cls.__ne__ = _make_compare('__ne__')
cls.__lt__ = _make_compare('__lt__')
cls.__gt__ = _make_compare('__gt__')
cls.__le__ = _make_compare('__le__')
cls.__ge__ = _make_compare('__ge__')
def _delegate_method(self, name, *args, **kwargs):
""" method delegation to the ._values """
method = getattr(self._values, name)
if 'inplace' in kwargs:
raise ValueError("cannot use inplace with CategoricalIndex")
res = method(*args, **kwargs)
if is_scalar(res):
return res
return CategoricalIndex(res, name=self.name)
@classmethod
def _add_accessors(cls):
""" add in Categorical accessor methods """
from pandas.core.categorical import Categorical
CategoricalIndex._add_delegate_accessors(
delegate=Categorical, accessors=["rename_categories",
"reorder_categories",
"add_categories",
"remove_categories",
"remove_unused_categories",
"set_categories",
"as_ordered", "as_unordered",
"min", "max"],
typ='method', overwrite=True)
CategoricalIndex._add_numeric_methods_add_sub_disabled()
CategoricalIndex._add_numeric_methods_disabled()
CategoricalIndex._add_logical_methods_disabled()
CategoricalIndex._add_comparison_methods()
CategoricalIndex._add_accessors()
| apache-2.0 |
vaishaksuresh/udacity_data_analyst | P2/ProblemSets_2_to_4/P4_01.py | 1 | 1593 | from pandas import *
from ggplot import *
def plot_weather_data(turnstile_weather):
'''
You are passed in a dataframe called turnstile_weather.
Use turnstile_weather along with ggplot to make a data visualization
focused on the MTA and weather data we used in assignment #3.
You should feel free to implement something that we discussed in class
(e.g., scatterplots, line plots, or histograms) or attempt to implement
something more advanced if you'd like.
Here are some suggestions for things to investigate and illustrate:
* Ridership by time of day or day of week
* How ridership varies based on Subway station (UNIT)
* Which stations have more exits or entries at different times of day
(You can use UNIT as a proxy for subway station.)
If you'd like to learn more about ggplot and its capabilities, take
a look at the documentation at:
https://pypi.python.org/pypi/ggplot/
You can check out:
https://www.dropbox.com/s/meyki2wl9xfa7yk/turnstile_data_master_with_weather.csv
To see all the columns and data points included in the turnstile_weather
dataframe.
However, due to the limitation of our Amazon EC2 server, we are giving you a random
subset, about 1/3 of the actual data in the turnstile_weather dataframe.
'''
pandas.options.mode.chained_assignment = None
turnstile_weather['DATEn'] = pandas.to_datetime(turnstile_weather['DATEn'])
plot = ggplot(turnstile_weather, aes('DATEn','ENTRIESn_hourly'))+geom_point()+geom_line()
return plot
| gpl-2.0 |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Coupled_Contact/Steady_State_Single_Foundation_Sysytem_Under_Compression/CoupledHardContact_NonLinHardSoftShear/n_1/Plot_Results.py | 15 | 3553 | #!/usr/bin/env python
#!/usr/bin/python
import h5py
from matplotlib import pylab
import matplotlib.pylab as plt
import sys
from matplotlib.font_manager import FontProperties
import math
import numpy as np
#!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
plt.rcParams.update({'font.size': 30})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=28
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=28
# Plot the figure. Add labels and titles.
plt.figure()
ax = plt.subplot(111)
ax.grid()
ax.set_xlabel("Time [s] ")
ax.set_ylabel(r"Stress [Pa] ")
# Pore Pressure
# #########################################################################
thefile = "Soil_Foundation_System_Surface_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
upU_p = finput["/Model/Nodes/Generalized_Displacements"][3,:]
upU_u = finput["/Model/Nodes/Generalized_Displacements"][2,:]
upU_U = finput["/Model/Nodes/Generalized_Displacements"][6,:]
u_u = finput["/Model/Nodes/Generalized_Displacements"][79,:]
sigma_zz_ = finput["/Model/Elements/Gauss_Outputs"][14,:]
# pore_pressure
ax.plot(times,upU_p,'b',linewidth=2,label=r'Pore Pressure $p$');
ax.hold(True);
# Total Stress
# #########################################################################
# Read the time and displacement
times = finput["time"][:];
T = times[len(times)-1]
sigma_zz = 400/T*times
# kinetic energy
ax.plot(times,sigma_zz,'k',linewidth=2,label=r'Total Stress $\sigma$');
ax.hold(True);
# Effective Stress
# #########################################################################
# Read the time and displacement
times = finput["time"][:];
sigma_zz_ = sigma_zz - upU_p
# kinetic energy
ax.plot(times,sigma_zz_,'r',linewidth=2,label=r'''Effective Stress $\sigma^{\prime}$''');
ax.hold(True);
max_yticks = 5
yloc = plt.MaxNLocator(max_yticks)
ax.yaxis.set_major_locator(yloc)
max_xticks = 5
yloc = plt.MaxNLocator(max_xticks)
ax.xaxis.set_major_locator(yloc)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.35),
ncol=2, fancybox=True, shadow=True, prop={'size': 24})
pylab.savefig("Coupled_Soft_Contact_Steady_State_SF_Ststem_Under_Compression_Porosity_Effective_Stress_Principle.pdf", bbox_inches='tight')
# plt.show()
#
################################### Drainage Condition Verification #############################
ax.hold(False);
fig = plt.figure();
ax = plt.subplot(111)
ax.plot(times,upU_u*1e8,'k',linewidth=3,label=r'$upU\_u$'); ax.hold(True);
ax.plot(times,upU_U*1e8,'b',linewidth=10,label=r'$upU\_U$'); ax.hold(True);
ax.plot(times,u_u*1e8,'r',linewidth=3,label=r'$u\_u$'); ax.hold(True);
ax.grid()
ax.set_xlabel("Time [s] ")
ax.set_ylabel(r"Displacement $\times 1e^{-8}$ [m] ")
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.25),
ncol=4, fancybox=True, shadow=True, prop={'size': 24})
max_yticks = 5
yloc = plt.MaxNLocator(max_yticks)
ax.yaxis.set_major_locator(yloc)
max_xticks = 5
yloc = plt.MaxNLocator(max_xticks)
ax.xaxis.set_major_locator(yloc)
pylab.savefig("Coupled_Soft_Contact_Steady_State_SF_Ststem_Under_Compression_Porosity_Undrained_Conditions.pdf", bbox_inches='tight')
# plt.show()
| cc0-1.0 |
vibhorag/scikit-learn | examples/neighbors/plot_classification.py | 287 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
has2k1/plotnine | plotnine/positions/position_stack.py | 1 | 2953 | from warnings import warn
import numpy as np
import pandas as pd
from ..exceptions import PlotnineWarning
from ..utils import remove_missing
from .position import position
class position_stack(position):
"""
Stack plotted objects on top of each other
The objects to stack are those that have
an overlapping x range.
"""
fill = False
def __init__(self, vjust=1, reverse=False):
self.params = {'vjust': vjust,
'reverse': reverse}
def setup_params(self, data):
"""
Verify, modify & return a copy of the params.
"""
# Variable for which to do the stacking
if 'ymax' in data:
if any((data['ymin'] != 0) & (data['ymax'] != 0)):
warn("Stacking not well defined when not "
"anchored on the axis.", PlotnineWarning)
var = 'ymax'
elif 'y' in data:
var = 'y'
else:
warn("Stacking requires either ymin & ymax or y "
"aesthetics. Maybe you want position = 'identity'?",
PlotnineWarning)
var = None
params = self.params.copy()
params['var'] = var
params['fill'] = self.fill
return params
def setup_data(self, data, params):
if not params['var']:
return data
if params['var'] == 'y':
data['ymax'] = data['y']
elif params['var'] == 'ymax':
bool_idx = data['ymax'] == 0
data.loc[bool_idx, 'ymax'] = data.loc[bool_idx, 'ymin']
data = remove_missing(
data,
vars=('x', 'xmin', 'xmax', 'y'),
name='position_stack')
return data
@classmethod
def compute_panel(cls, data, scales, params):
if not params['var']:
return data
negative = data['ymax'] < 0
neg = data.loc[negative]
pos = data.loc[~negative]
if len(neg):
neg = cls.collide(neg, params=params)
if len(pos):
pos = cls.collide(pos, params=params)
data = pd.concat([neg, pos], axis=0, ignore_index=True, sort=True)
return data
@staticmethod
def strategy(data, params):
"""
Stack overlapping intervals.
Assumes that each set has the same horizontal position
"""
vjust = params['vjust']
y = data['y'].copy()
y[np.isnan(y)] = 0
heights = np.append(0, y.cumsum())
if params['fill']:
heights = heights / np.abs(heights[-1])
data['ymin'] = np.min([heights[:-1], heights[1:]], axis=0)
data['ymax'] = np.max([heights[:-1], heights[1:]], axis=0)
# less intuitive than (ymin + vjust(ymax-ymin)), but
# this way avoids subtracting numbers of potentially
# similar precision
data['y'] = ((1-vjust)*data['ymin'] + vjust*data['ymax'])
return data
| gpl-2.0 |
FYP-DES5/deepscan-core | plot.py | 1 | 6984 | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import cv2, sys
class EdgeImprover:
def __init__(self, voxels, gridsize, minX, minY, maxX, maxY):
self.voxels = voxels
self.gridsize = gridsize
self.mask = np.zeros((3 + maxX - minX, 3 + maxY - minY), dtype=np.uint8)
self.offset = (1 - minX, 1 - minY)
for k in self.voxels.keys():
self.mask[tuple(np.array(k) + self.offset)] = 255
self.edge = self.mask - cv2.erode(self.mask, cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)))
self.toBeDeleted = []
def run(self):
it = np.nditer(self.edge, flags=['multi_index'])
frequencies = {}
while not it.finished:
if it[0] != 0:
it.iternext()
continue
point = tuple(np.array(it.multi_index) - self.offset)
if point not in self.voxels:
it.iternext()
continue
frequency = len(self.voxels[point])
frequencies[frequency] = 1 + frequencies.get(frequency, 0)
it.iternext()
modeGridPoints = max(frequencies, key=lambda k: frequencies[k])
it = np.nditer(self.edge, flags=['multi_index'])
while not it.finished:
if it[0] == 0:
it.iternext()
continue
point = tuple(np.array(it.multi_index) - self.offset)
points = self.__getNeighborhoodPoints(self.voxels, *point)
centroid, ns = self.__fitPointsToPlanes(points)
center = np.array(point, dtype=np.float64) * self.gridsize
targetAreaRatio = len(self.voxels[point]) / float(modeGridPoints)
xy = self.__calculateBestSample(center, centroid, self.gridsize, targetAreaRatio)
new = [self.__genVFromXYNNN(xy[0] - centroid[0], xy[1] - centroid[1], ns) + centroid]
self.voxels[(point, 'calibrated')] = new
self.toBeDeleted.append(point)
it.iternext()
for x in self.toBeDeleted:
del self.voxels[x]
@staticmethod
def __getNeighborhoodPoints(voxels, x, y):
return sum([[] if (i, j) not in voxels else voxels[(i, j)]
for i in range(x - 1, x + 2)
for j in range(y - 1, y + 2)],
[])
@staticmethod
def __genVFromXYNNN(x, y, ns):
v = np.array([x, y, 0, 0, 0])
for i in range(2, 5):
n = ns[i - 2]
v[i] = -np.dot(v[[0, 1]], n[[0, 1]]) / n[2]
return v
@staticmethod
def __fitPointsToPlanes(points):
if type(points) is not np.ndarray:
points = np.array(points)
centroid = np.average(points, axis=0)
pointsRelativeToCentroid = points - centroid
timesTable = np.dot(pointsRelativeToCentroid.T, pointsRelativeToCentroid)
def getNormal(n):
D = np.linalg.det(timesTable[0:2, 0:2])
a = np.linalg.det(timesTable[0:2, (1,n)]) / D
b = -np.linalg.det(timesTable[(0,n), 0:2]) / D
return np.array([a, b, 1])
return centroid, map(getNormal, range(2, 5))
@staticmethod
def __calculateBestSample(center, centroid, gridsize, targetAreaRatio):
# const center, const centroid
center = np.copy(center)
centroid = np.copy(centroid[[0, 1]])
d = center - centroid
# if ratio is more than half
if targetAreaRatio > 0.5:
# equivalent to reversing direction and finding complement
d = -d
centroid = centroid + d
targetAreaRatio = 1 - targetAreaRatio
# if horizontal d
if abs(d[0]) > abs(d[1]):
# swap x and y of input
center[[0, 1]] = center[[1, 0]]
centroid[[0, 1]] = centroid[[1, 0]]
yx = EdgeImprover.__calculateBestSample(center, centroid, gridsize, targetAreaRatio)
# swap x and y of output
return yx[[1, 0]]
# if centroid is above
if d[1] < 0:
# reflect y of input
center[1] = -center[1]
centroid[1] = -centroid[1]
x_negY = EdgeImprover.__calculateBestSample(center, centroid, gridsize, targetAreaRatio)
# reflect y of output
return x_negY * [1, -1]
# if centroid is to the right
if d[0] < 0:
# reflect y of input
center[0] = -center[0]
centroid[0] = -centroid[0]
negX_y = EdgeImprover.__calculateBestSample(center, centroid, gridsize, targetAreaRatio)
# reflect y of output
return negX_y * [-1, 1]
# valid assumption: centroid is between S45W and S, ratio <= 0.5
halfGrid = gridsize / 2.0
# m = dy / dx
md = d[1] / d[0]
# mx + c = y
# c = y - mx
cd = center[1] - md * center[0]
# `y = h` is a line cutting square in targetAreaRatio
h = gridsize * targetAreaRatio + center[1] - halfGrid
# `y = mx + c` is a line cutting square in targetAreaRatio
# and perpendicular to center - centroid
m1 = -(d[0] / d[1])
# mx + c = y
# c = y - mx
c1 = h - m1 * center[0]
# test if `y = mx + c` touches the left and right edge of the square
leftY = m1 * (center[0] - halfGrid) + c1
rightY = m1 * (center[0] + halfGrid) + c1
if all(map(lambda y: center[1] - halfGrid < y < center[1] + halfGrid,
[leftY, rightY])):
# -m1x + y = c1
# -mdx + y = cd
# -> [-m1 1; -md 1][x; y] = [c1; cd]
return np.linalg.solve([[-m1, 1], [-md, 1]], [c1, cd])
else:
# area must be triangular
# let base be bt, height be ht
# area = bt ht / 2
# md = bt / ht
# area = md / 2 * ht^2
# ht = sqrt(2area / md)
m2 = m1
# mx + c = y
# c = y - mx
ht = np.sqrt(2 * targetAreaRatio * gridsize**2 / md)
yt = ht + center[1] - halfGrid
c2 = yt - m2 * (center[0] - halfGrid)
xy = np.linalg.solve([[-m2, 1], [-md, 1]], [c2, cd])
# check if in range
if not xy[1] < center[1] - halfGrid:
return xy
else:
# triangle too small, point outside of square
# compromise: return closest point on line
bt = md * ht
xt = bt + center[0] - halfGrid
return np.array([xt, center[1] - halfGrid])
def voxelGridFilter(points, tcoords, gridsize=0.01, improve=True):
voxels = {}
maxX, maxY, minX, minY = -sys.maxint - 1, -sys.maxint - 1, sys.maxint, sys.maxint
for i in range(len(points)):
n = tuple(map(lambda x: int(round(x / gridsize)),np.copy(points[i]))[0:2])
if n not in voxels:
voxels[n] = []
minX, maxX = min(n[0], minX), max(n[0], maxX)
minY, maxY = min(n[1], minY), max(n[1], maxY)
voxels[n].append(np.hstack((points[i], tcoords[i])))
if improve:
EdgeImprover(voxels, gridsize, minX, minY, maxX, maxY).run()
rp = [np.average(np.array([e[0:3] for e in voxels[n]]), axis=0) for n in voxels]
rt = [np.average(np.array([e[3:5] for e in voxels[n]]), axis=0) for n in voxels]
return rp, rt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
pts = 30
r = np.arange(pts, dtype=np.float64) / (pts - 1)
x = np.hstack([r for i in range(pts)]).reshape((pts**2,))
y = np.vstack([r for i in range(pts)]).T.reshape((pts**2,))
z = -((np.abs(x - 0.5) - 0.25)**2) + np.random.normal(0, 0.01, (pts**2,))
ax.scatter(x, y, z, c='r', s=0.5)
points = zip(x,y,z)
tcoords = zip(x,y)
newPoints, newTcoords = voxelGridFilter(points, tcoords, 0.1, False)
ax.scatter(*([map(lambda p: p[i], newPoints) for i in range(3)]), c='b')
newPoints, newTcoords = voxelGridFilter(points, tcoords, 0.1, True)
ax.scatter(*([map(lambda p: p[i], newPoints) for i in range(3)]), c='g')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
| mit |
catalyst-cooperative/pudl | src/pudl/extract/epacems.py | 1 | 8422 | """
Retrieve data from EPA CEMS hourly zipped CSVs.
This modules pulls data from EPA's published CSV files.
"""
import logging
from pathlib import Path
from typing import NamedTuple
from zipfile import ZipFile
import pandas as pd
from pudl.workspace.datastore import Datastore
logger = logging.getLogger(__name__)
# EPA CEMS constants #####
RENAME_DICT = {
"STATE": "state",
# "FACILITY_NAME": "plant_name", # Not reading from CSV
"ORISPL_CODE": "plant_id_eia",
"UNITID": "unitid",
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": "op_date",
"OP_HOUR": "op_hour",
"OP_TIME": "operating_time_hours",
"GLOAD (MW)": "gross_load_mw",
"GLOAD": "gross_load_mw",
"SLOAD (1000 lbs)": "steam_load_1000_lbs",
"SLOAD (1000lb/hr)": "steam_load_1000_lbs",
"SLOAD": "steam_load_1000_lbs",
"SO2_MASS (lbs)": "so2_mass_lbs",
"SO2_MASS": "so2_mass_lbs",
"SO2_MASS_MEASURE_FLG": "so2_mass_measurement_code",
# "SO2_RATE (lbs/mmBtu)": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": "so2_rate_measure_flg", # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": "nox_rate_lbs_mmbtu",
"NOX_RATE": "nox_rate_lbs_mmbtu",
"NOX_RATE_MEASURE_FLG": "nox_rate_measurement_code",
"NOX_MASS (lbs)": "nox_mass_lbs",
"NOX_MASS": "nox_mass_lbs",
"NOX_MASS_MEASURE_FLG": "nox_mass_measurement_code",
"CO2_MASS (tons)": "co2_mass_tons",
"CO2_MASS": "co2_mass_tons",
"CO2_MASS_MEASURE_FLG": "co2_mass_measurement_code",
# "CO2_RATE (tons/mmBtu)": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": "co2_rate_measure_flg", # Not reading from CSV
"HEAT_INPUT (mmBtu)": "heat_content_mmbtu",
"HEAT_INPUT": "heat_content_mmbtu",
"FAC_ID": "facility_id",
"UNIT_ID": "unit_id_epa",
}
"""dict: A dictionary containing EPA CEMS column names (keys) and replacement
names to use when reading those columns into PUDL (values).
"""
# Any column that exactly matches one of these won't be read
IGNORE_COLS = {
"FACILITY_NAME",
"SO2_RATE (lbs/mmBtu)",
"SO2_RATE",
"SO2_RATE_MEASURE_FLG",
"CO2_RATE (tons/mmBtu)",
"CO2_RATE",
"CO2_RATE_MEASURE_FLG",
}
"""set: The set of EPA CEMS columns to ignore when reading data."""
# Specify dtypes to for reading the CEMS CSVs
CSV_DTYPES = {
"STATE": pd.StringDtype(),
# "FACILITY_NAME": str, # Not reading from CSV
"ORISPL_CODE": pd.Int64Dtype(),
"UNITID": pd.StringDtype(),
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": pd.StringDtype(),
"OP_HOUR": pd.Int64Dtype(),
"OP_TIME": float,
"GLOAD (MW)": float,
"GLOAD": float,
"SLOAD (1000 lbs)": float,
"SLOAD (1000lb/hr)": float,
"SLOAD": float,
"SO2_MASS (lbs)": float,
"SO2_MASS": float,
"SO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "SO2_RATE (lbs/mmBtu)": float, # Not reading from CSV
# "SO2_RATE": float, # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": float,
"NOX_RATE": float,
"NOX_RATE_MEASURE_FLG": pd.StringDtype(),
"NOX_MASS (lbs)": float,
"NOX_MASS": float,
"NOX_MASS_MEASURE_FLG": pd.StringDtype(),
"CO2_MASS (tons)": float,
"CO2_MASS": float,
"CO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "CO2_RATE (tons/mmBtu)": float, # Not reading from CSV
# "CO2_RATE": float, # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"HEAT_INPUT (mmBtu)": float,
"HEAT_INPUT": float,
"FAC_ID": pd.Int64Dtype(),
"UNIT_ID": pd.Int64Dtype(),
}
"""dict: A dictionary containing column names (keys) and data types (values)
for EPA CEMS.
"""
class EpaCemsPartition(NamedTuple):
"""Represents EpaCems partition identifying unique resource file."""
year: str
state: str
def get_key(self):
"""Returns hashable key for use with EpaCemsDatastore."""
return (self.year, self.state.lower())
def get_filters(self):
"""Returns filters for retrieving given partition resource from Datastore."""
return dict(year=self.year, state=self.state.lower())
def get_monthly_file(self, month: int) -> Path:
"""Returns the filename (without suffix) that contains the monthly data."""
return Path(f"{self.year}{self.state.lower()}{month:02}")
class EpaCemsDatastore:
"""Helper class to extract EpaCems resources from datastore.
EpaCems resources are identified by a year and a state. Each of these zip files
contain monthly zip files that in turn contain csv files. This class implements
get_data_frame method that will concatenate tables for a given state and month
across all months.
"""
def __init__(self, datastore: Datastore):
"""Constructs a simple datastore wrapper for loading EpaCems dataframes from datastore."""
self.datastore = datastore
def get_data_frame(self, partition: EpaCemsPartition) -> pd.DataFrame:
"""Constructs dataframe holding data for a given (year, state) partition."""
archive = self.datastore.get_zipfile_resource(
"epacems", **partition.get_filters())
dfs = []
for month in range(1, 13):
mf = partition.get_monthly_file(month)
with archive.open(str(mf.with_suffix(".zip")), "r") as mzip:
with ZipFile(mzip, "r").open(str(mf.with_suffix(".csv")), "r") as csv_file:
dfs.append(self._csv_to_dataframe(csv_file))
return pd.concat(dfs, sort=True, copy=False, ignore_index=True)
def _csv_to_dataframe(self, csv_file) -> pd.DataFrame:
"""
Convert a CEMS csv file into a :class:`pandas.DataFrame`.
Note that some columns are not read. See
:mod:`pudl.constants.epacems_columns_to_ignore`. Data types for the columns
are specified in :mod:`pudl.constants.epacems_csv_dtypes` and names of the
output columns are set by :mod:`pudl.constants.epacems_rename_dict`.
Args:
csv (file-like object): data to be read
Returns:
pandas.DataFrame: A DataFrame containing the contents of the
CSV file.
"""
df = pd.read_csv(
csv_file,
index_col=False,
usecols=lambda col: col not in IGNORE_COLS,
)
return (
df.astype({col: CSV_DTYPES[col] for col in CSV_DTYPES if col in df.columns})
.rename(columns=RENAME_DICT)
)
def extract(epacems_years, states, ds: Datastore):
"""
Coordinate the extraction of EPA CEMS hourly DataFrames.
Args:
epacems_years (list): The years of CEMS data to extract, as 4-digit
integers.
states (list): The states whose CEMS data we want to extract, indicated
by 2-letter US state codes.
ds (:class:`Datastore`): Initialized datastore
Yields:
dict: a dictionary with a single EPA CEMS tabular data resource name as
the key, having the form "hourly_emissions_epacems_YEAR_STATE" where
YEAR is a 4 digit number and STATE is a lower case 2-letter code for a
US state. The value is a :class:`pandas.DataFrame` containing all the
raw EPA CEMS hourly emissions data for the indicated state and year.
"""
ds = EpaCemsDatastore(ds)
for year in epacems_years:
# The keys of the us_states dictionary are the state abbrevs
for state in states:
partition = EpaCemsPartition(state=state, year=year)
logger.info(f"Performing ETL for EPA CEMS hourly {state}-{year}")
# Return a dictionary where the key identifies this dataset
# (just like the other extract functions), but unlike the
# others, this is yielded as a generator (and it's a one-item
# dictionary).
yield {
("hourly_emissions_epacems_" + str(year) + "_" + state.lower()):
ds.get_data_frame(partition)
}
| mit |
mwmuni/LIGGGHTS_GUI | networkx/drawing/nx_pylab.py | 10 | 30226 | """
**********
Matplotlib
**********
Draw networks with matplotlib.
See Also
--------
matplotlib: http://matplotlib.org/
pygraphviz: http://pygraphviz.github.io/
"""
# Author: Aric Hagberg (hagberg@lanl.gov)
# Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.drawing.layout import shell_layout,\
circular_layout,spectral_layout,spring_layout,random_layout
__all__ = ['draw',
'draw_networkx',
'draw_networkx_nodes',
'draw_networkx_edges',
'draw_networkx_labels',
'draw_networkx_edge_labels',
'draw_circular',
'draw_random',
'draw_spectral',
'draw_spring',
'draw_shell',
'draw_graphviz']
def draw(G, pos=None, ax=None, hold=None, **kwds):
"""Draw the graph G with Matplotlib.
Draw the graph as a simple representation with no node
labels or edge labels and using the full Matplotlib figure area
and no axis labels by default. See draw_networkx() for more
full-featured drawing that allows title, axis labels etc.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in specified Matplotlib axes.
hold : bool, optional
Set the Matplotlib hold state. If True subsequent draw
commands will be added to the current axes.
kwds : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
See Also
--------
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
Notes
-----
This function has the same name as pylab.draw and pyplot.draw
so beware when using
>>> from networkx import *
since you might overwrite the pylab.draw function.
With pyplot use
>>> import matplotlib.pyplot as plt
>>> import networkx as nx
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> plt.draw() # pyplot draw()
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
cf = plt.gcf()
else:
cf = ax.get_figure()
cf.set_facecolor('w')
if ax is None:
if cf._axstack() is None:
ax = cf.add_axes((0, 0, 1, 1))
else:
ax = cf.gca()
if 'with_labels' not in kwds:
kwds['with_labels'] = 'labels' in kwds
b = plt.ishold()
# allow callers to override the hold state by passing hold=True|False
h = kwds.pop('hold', None)
if h is not None:
plt.hold(h)
try:
draw_networkx(G, pos=pos, ax=ax, **kwds)
ax.set_axis_off()
plt.draw_if_interactive()
except:
plt.hold(b)
raise
plt.hold(b)
return
def draw_networkx(G, pos=None, arrows=True, with_labels=True, **kwds):
"""Draw the graph G using Matplotlib.
Draw the graph with Matplotlib with options for node positions,
labeling, titles, and many other drawing features.
See draw() for simple drawing without labels or axes.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
with_labels : bool, optional (default=True)
Set to True to draw labels on the nodes.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional (default G.nodes())
Draw only specified nodes
edgelist : list, optional (default=G.edges())
Draw only specified edges
node_size : scalar or array, optional (default=300)
Size of nodes. If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats, (default='r')
Node color. Can be a single color format string,
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string, optional (default='o')
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
alpha : float, optional (default=1.0)
The node and edge transparency
cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of nodes
vmin,vmax : float, optional (default=None)
Minimum and maximum for node colormap scaling
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
width : float, optional (default=1.0)
Line width of edges
edge_color : color string, or array of floats (default='r')
Edge color. Can be a single color format string,
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
edge_cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of edges
edge_vmin,edge_vmax : floats, optional (default=None)
Minimum and maximum for edge colormap scaling
style : string, optional (default='solid')
Edge line style (solid|dashed|dotted,dashdot)
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int, optional (default=12)
Font size for text labels
font_color : string, optional (default='k' black)
Font color string
font_weight : string, optional (default='normal')
Font weight
font_family : string, optional (default='sans-serif')
Font family
label : string, optional
Label for graph legend
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
>>> import matplotlib.pyplot as plt
>>> limits=plt.axis('off') # turn of axis
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if pos is None:
pos = nx.drawing.spring_layout(G) # default to spring layout
node_collection = draw_networkx_nodes(G, pos, **kwds)
edge_collection = draw_networkx_edges(G, pos, arrows=arrows, **kwds)
if with_labels:
draw_networkx_labels(G, pos, **kwds)
plt.draw_if_interactive()
def draw_networkx_nodes(G, pos,
nodelist=None,
node_size=300,
node_color='r',
node_shape='o',
alpha=1.0,
cmap=None,
vmin=None,
vmax=None,
ax=None,
linewidths=None,
label=None,
**kwds):
"""Draw the nodes of the graph G.
This draws only the nodes of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional
Draw only specified nodes (default G.nodes())
node_size : scalar or array
Size of nodes (default=300). If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats
Node color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8' (default='o').
alpha : float
The node transparency (default=1.0)
cmap : Matplotlib colormap
Colormap for mapping intensities of nodes (default=None)
vmin,vmax : floats
Minimum and maximum for node colormap scaling (default=None)
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
label : [None| string]
Label for legend
Returns
-------
matplotlib.collections.PathCollection
`PathCollection` of the nodes.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nodes=nx.draw_networkx_nodes(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if nodelist is None:
nodelist = G.nodes()
if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing
return None
try:
xy = numpy.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise nx.NetworkXError('Node %s has no position.'%e)
except ValueError:
raise nx.NetworkXError('Bad value in node positions.')
node_collection = ax.scatter(xy[:, 0], xy[:, 1],
s=node_size,
c=node_color,
marker=node_shape,
cmap=cmap,
vmin=vmin,
vmax=vmax,
alpha=alpha,
linewidths=linewidths,
label=label)
node_collection.set_zorder(2)
return node_collection
def draw_networkx_edges(G, pos,
edgelist=None,
width=1.0,
edge_color='k',
style='solid',
alpha=1.0,
edge_cmap=None,
edge_vmin=None,
edge_vmax=None,
ax=None,
arrows=True,
label=None,
**kwds):
"""Draw the edges of the graph G.
This draws only the edges of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
edgelist : collection of edge tuples
Draw only specified edges(default=G.edges())
width : float, or array of floats
Line width of edges (default=1.0)
edge_color : color string, or array of floats
Edge color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
style : string
Edge line style (default='solid') (solid|dashed|dotted,dashdot)
alpha : float
The edge transparency (default=1.0)
edge_ cmap : Matplotlib colormap
Colormap for mapping intensities of edges (default=None)
edge_vmin,edge_vmax : floats
Minimum and maximum for edge colormap scaling (default=None)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
label : [None| string]
Label for legend
Returns
-------
matplotlib.collection.LineCollection
`LineCollection` of the edges
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edges=nx.draw_networkx_edges(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
from matplotlib.colors import colorConverter, Colormap
from matplotlib.collections import LineCollection
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edgelist is None:
edgelist = G.edges()
if not edgelist or len(edgelist) == 0: # no edges!
return None
# set edge positions
edge_pos = numpy.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist])
if not cb.iterable(width):
lw = (width,)
else:
lw = width
if not cb.is_string_like(edge_color) \
and cb.iterable(edge_color) \
and len(edge_color) == len(edge_pos):
if numpy.alltrue([cb.is_string_like(c)
for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([colorConverter.to_rgba(c, alpha)
for c in edge_color])
elif numpy.alltrue([not cb.is_string_like(c)
for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if numpy.alltrue([cb.iterable(c) and len(c) in (3, 4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must consist of either color names or numbers')
else:
if cb.is_string_like(edge_color) or len(edge_color) == 1:
edge_colors = (colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges')
edge_collection = LineCollection(edge_pos,
colors=edge_colors,
linewidths=lw,
antialiaseds=(1,),
linestyle=style,
transOffset = ax.transData,
)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
# Note: there was a bug in mpl regarding the handling of alpha values for
# each line in a LineCollection. It was fixed in matplotlib in r7184 and
# r7189 (June 6 2009). We should then not set the alpha value globally,
# since the user can instead provide per-edge alphas now. Only set it
# globally if provided as a scalar.
if cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, Colormap))
edge_collection.set_array(numpy.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
arrow_collection = None
if G.is_directed() and arrows:
# a directed graph hack
# draw thick line segments at head end of edge
# waiting for someone else to implement arrows that will work
arrow_colors = edge_colors
a_pos = []
p = 1.0-0.25 # make head segment 25 percent of edge length
for src, dst in edge_pos:
x1, y1 = src
x2, y2 = dst
dx = x2-x1 # x offset
dy = y2-y1 # y offset
d = numpy.sqrt(float(dx**2 + dy**2)) # length of edge
if d == 0: # source and target at same position
continue
if dx == 0: # vertical edge
xa = x2
ya = dy*p+y1
if dy == 0: # horizontal edge
ya = y2
xa = dx*p+x1
else:
theta = numpy.arctan2(dy, dx)
xa = p*d*numpy.cos(theta)+x1
ya = p*d*numpy.sin(theta)+y1
a_pos.append(((xa, ya), (x2, y2)))
arrow_collection = LineCollection(a_pos,
colors=arrow_colors,
linewidths=[4*ww for ww in lw],
antialiaseds=(1,),
transOffset = ax.transData,
)
arrow_collection.set_zorder(1) # edges go behind nodes
arrow_collection.set_label(label)
ax.add_collection(arrow_collection)
# update view
minx = numpy.amin(numpy.ravel(edge_pos[:, :, 0]))
maxx = numpy.amax(numpy.ravel(edge_pos[:, :, 0]))
miny = numpy.amin(numpy.ravel(edge_pos[:, :, 1]))
maxy = numpy.amax(numpy.ravel(edge_pos[:, :, 1]))
w = maxx-minx
h = maxy-miny
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
ax.update_datalim(corners)
ax.autoscale_view()
# if arrow_collection:
return edge_collection
def draw_networkx_labels(G, pos,
labels=None,
font_size=12,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
**kwds):
"""Draw node labels on the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_family : string
Font family (default='sans-serif')
font_weight : string
Font weight (default='normal')
alpha : float
The text transparency (default=1.0)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
Returns
-------
dict
`dict` of labels keyed on the nodes
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> labels=nx.draw_networkx_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if labels is None:
labels = dict((n, n) for n in G.nodes())
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
text_items = {} # there is no text collection so we'll fake one
for n, label in labels.items():
(x, y) = pos[n]
if not cb.is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=ax.transData,
bbox=bbox,
clip_on=True,
)
text_items[n] = t
return text_items
def draw_networkx_edge_labels(G, pos,
edge_labels=None,
label_pos=0.5,
font_size=10,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
rotate=True,
**kwds):
"""Draw edge labels.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
alpha : float
The text transparency (default=1.0)
edge_labels : dictionary
Edge labels in a dictionary keyed by edge two-tuple of text
labels (default=None). Only labels for the keys in the dictionary
are drawn.
label_pos : float
Position of edge label along edge (0=head, 0.5=center, 1=tail)
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_weight : string
Font weight (default='normal')
font_family : string
Font family (default='sans-serif')
bbox : Matplotlib bbox
Specify text box shape and colors.
clip_on : bool
Turn on clipping at axis boundaries (default=True)
Returns
-------
dict
`dict` of labels keyed on the edges
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edge_labels=nx.draw_networkx_edge_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edge_labels is None:
labels = dict(((u, v), d) for u, v, d in G.edges(data=True))
else:
labels = edge_labels
text_items = {}
for (n1, n2), label in labels.items():
(x1, y1) = pos[n1]
(x2, y2) = pos[n2]
(x, y) = (x1 * label_pos + x2 * (1.0 - label_pos),
y1 * label_pos + y2 * (1.0 - label_pos))
if rotate:
angle = numpy.arctan2(y2-y1, x2-x1)/(2.0*numpy.pi)*360 # degrees
# make label orientation "right-side-up"
if angle > 90:
angle -= 180
if angle < - 90:
angle += 180
# transform data coordinate angle to screen coordinate angle
xy = numpy.array((x, y))
trans_angle = ax.transData.transform_angles(numpy.array((angle,)),
xy.reshape((1, 2)))[0]
else:
trans_angle = 0.0
# use default box of white with white border
if bbox is None:
bbox = dict(boxstyle='round',
ec=(1.0, 1.0, 1.0),
fc=(1.0, 1.0, 1.0),
)
if not cb.is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation=trans_angle,
transform=ax.transData,
bbox=bbox,
zorder=1,
clip_on=True,
)
text_items[(n1, n2)] = t
return text_items
def draw_circular(G, **kwargs):
"""Draw the graph G with a circular layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, circular_layout(G), **kwargs)
def draw_random(G, **kwargs):
"""Draw the graph G with a random layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, random_layout(G), **kwargs)
def draw_spectral(G, **kwargs):
"""Draw the graph G with a spectral layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spectral_layout(G), **kwargs)
def draw_spring(G, **kwargs):
"""Draw the graph G with a spring layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spring_layout(G), **kwargs)
def draw_shell(G, **kwargs):
"""Draw networkx graph with shell layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
nlist = kwargs.get('nlist', None)
if nlist is not None:
del(kwargs['nlist'])
draw(G, shell_layout(G, nlist=nlist), **kwargs)
def draw_graphviz(G, prog="neato", **kwargs):
"""Draw networkx graph with graphviz layout.
Parameters
----------
G : graph
A networkx graph
prog : string, optional
Name of Graphviz layout program
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
"""
pos = nx.drawing.graphviz_layout(G, prog)
draw(G, pos, **kwargs)
def draw_nx(G, pos, **kwds):
"""For backward compatibility; use draw or draw_networkx."""
draw(G, pos, **kwds)
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import matplotlib as mpl
mpl.use('PS', warn=False)
import matplotlib.pyplot as plt
except:
raise SkipTest("matplotlib not available")
| gpl-3.0 |
Becksteinlab/PDB_Ion_Survey | src/pdbionsurvey/bulkcoord.py | 1 | 6385 |
m __future__ import division
import MDAnalysis as mda
import datreant as dtr
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pdbionsurvey.coordination
import json
import seaborn as sb
import scipy
import mmtf
import pdbionsurvey.collection
from matplotlib.ticker import MaxNLocator
# import pdbionsurvey.analysis
from os import path as pth
import os
import shutil
from glob import glob
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
import mmtf
def individualdee(prot, ionname='NA', atomname='O', bs=.1, mindistance=True, maxdistance=15, ts=1):
u = mda.Universe(prot[prot.name+'.pdb'].abspath)
ions = u.select_atoms('name '+ionname+' and resname '+ionname)
for ion in ions:
frames = []
for csv in prot.glob('coordination/'+ionname.upper()+'/'+atomname+'/*.csv'):
df = pd.read_csv(csv.abspath)
frames.append(df)
dataframe = pd.concat(frames)
h, e = np.histogram(dataframe['distance'], bins=np.arange(0, max(dataframe['distance']), bs))
m = .5 * (e[:-1] + e[1:])
V = 4. / 3 * np.pi * (e[1:] ** 3 - e[:-1] ** 3)
density = h / V
gdf = pd.DataFrame({'radius': m, 'density': density}, columns=['radius', 'density'])
gdf = gdf[gdf['radius'] < maxdistance]
if not mindistance:
gdf.to_csv(csvpath.abspath+'individual/d-'+prot.name+'-'+ionname.upper()+'-'+str(ion.resnum)+'-'+atomname+'-'+str(int(bs*100))+'pmbins.csv')
else:
mindistance = .5
gdf['density'] = [gdf['density'][i] if gdf['radius'][i]>.5 else 0 for i in range(len(gdf['density']))]
gdf.to_csv(csvpath.abspath+'individual/d-'+prot.name+'-'+ionname.upper()+'-'+str(ion.resnum)+'-'+atomname+'-'+str(int(bs*100))+'pmbins-withmin.csv')
def make_dees(ionname, atomnames=ATOMNAMES, bs=.1, mindistance=True, maxdistance=15, ts=1):
for atomname in atomnames:
print('started d '+ionname+' with '+atomname)
gdf = pdbionsurvey.coordination.gee(b, ionname, atomname=atomname, binsize=bs)
gdf = gdf[gdf['radius'] < maxdistance]
print('made d '+ionname+' with '+atomname)
if not mindistance:
gdf.to_csv(csvpath.abspath+'d-'+ionname+'-'+atomname+'-'+str(int(bs*100))+'pmbins.csv')
else:
mindistance = .5
gdf['density'] = [gdf['density'][i] if gdf['radius'][i]>mindistance else 0 for i in range(len(gdf['density']))]
gdf.to_csv(csvpath.abspath+'d-'+ionname+'-'+atomname+'-'+str(int(bs*100))+'pmbins-withmin.csv')
print('saved d '+ionname+' with '+atomname)
def getbins(num):
ts = int(num/10)
if ts == 0:
ts = num/10
if ts < 1 and ts >= .5:
ts = .5
elif ts < .5 and ts >= .2:
ts = .2
elif ts < .2:
ts = .1
return ts
def make_gees(ionname, atomname='O', maxdistance=15, bs=.1, bundle=b, path=csvpath):
fig = plt.figure(figsize=(4,3))
ax = fig.add_subplot(111)
fig.set_tight_layout(True)
fig1 = plt.figure(figsize=(4,3))
ax1 = fig1.add_subplot(111)
fig1.set_tight_layout(True)
print('started g '+ionname+' with '+atomname)
gdf = pd.read_csv(path.abspath+'d-'+ionname+'-'+atomname+'-'+str(int(bs*100))+'pmbins-withmin.csv')
gdf['density'] = [gdf['density'][i] if gdf['radius'][i]<.5 else 0 for i in range(len(gdf['density']))]
gdf = gdf[gdf['radius'] < maxdistance]
ax.plot(gdf['radius'], gdf['density'], label=propernames[ionname], linewidth=2)
yts = getbins(max(gdf['density']))
ts = getbins(max(gdf['radius']))
ax.set_xlabel(r'distance ($\mathrm{\AA}$)')
ax.set_ylabel(r'density ($\mathrm{\AA}^{-3}$)')
ax.xaxis.set_major_locator(MultipleLocator(5*ts))
ax.xaxis.set_minor_locator(MultipleLocator(ts))
ax1.yaxis.set_major_locator(MultipleLocator(.005))
ax1.yaxis.set_minor_locator(MultipleLocator(yts*.005))
sns.despine(offset=10, ax=ax)
ax.legend()
ax.figure.savefig(impath.abspath+'d-'+ionname+'-'+atomname+'-'+str(int(bs*100))+'pmbins-'+str(maxdistance)+'.png')
ax.figure.savefig(impath.abspath+'d-'+ionname+'-'+atomname+'-'+str(int(bs*100))+'pmbins-'+str(maxdistance)+'.pdf')
y = gdf['density']/bulkdensity[atomname]
ax1.plot(gdf['radius'], y, label=propernames[ionname], linewidth=2)
yts = getbins(max(y))
ts = getbins(max(gdf['radius']))
ax1.set_xlabel(r'distance ($\mathrm{\AA}$)')
ax1.set_ylabel(r'$g(r)$')
ax1.xaxis.set_major_locator(MultipleLocator(5*ts))
ax1.xaxis.set_minor_locator(MultipleLocator(ts))
ax1.yaxis.set_major_locator(MultipleLocator(5*yts))
ax1.yaxis.set_minor_locator(MultipleLocator(yts))
ax1.set_xlim(0, maxdistance)
ax1.set_ylim(0, max(y))
sns.despine(offset=10, ax=ax1)
ax1.legend()
ax1.plot(gdf['radius'], np.array([1 for i in range(len(gdf['radius']))]), color=(0,0,0), ls='dotted', alpha=.5)
ax1.figure.savefig(impath.abspath+'g-'+ionname+'-'+atomname+'-'+str(int(bs*100))+'pmbins-'+str(maxdistance)+'.png')
ax1.figure.savefig(impath.abspath+'g-'+ionname+'-'+atomname+'-'+str(int(bs*100))+'pmbins-'+str(maxdistance)+'.pdf')
df = pd.DataFrame({'radius': gdf['radius'], 'density': y}, columns=['radius', 'density'])
df.to_csv(csvpath.abspath+'g-'+ionname+'-'+atomname+'-'+str(int(bs*100))+'pmbins-'+str(maxdistance)+'.csv')
print('finished g '+ionname+' with '+atomname)
IONNAME = 'CL'
ATOMNAME = 'O'
def individualen(prot, ionname=IONNAME, atomname=ATOMNAME, bs=.1, mindistance=True, ts=1):
shellsize = shells['first min'][(IONNAME, ATOMNAME)]
maxdistance = shellsize
u = mda.Universe(prot[prot.name+'.pdb'].abspath)
coordnums = []
for csv in prot.glob('coordination/'+ionname.upper()+'/'+atomname+'/*.csv'):
df = pd.read_csv(csv.abspath)
try:
gdf = df[df['distance'] < maxdistance]
if mindistance:
gdf = gdf[gdf['distance'] > .5]
coordnum = len(gdf['distance'])
except TypeError:
coordnum = 'N/A'
continue
coordnums.append(coordnum)
atomnum = csv.name[:-4]
prot.categories['coordnum_'+atomnum] = coordnum
prot.categories[IONNAME+'_'+ATOMNAME+'_coordnums'] = str(coordnums)
return coordnums
| gpl-3.0 |
davidgbe/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
Galithil/status | status/clusters_per_lane.py | 2 | 2547 | import tornado.web
import json
import cStringIO
import matplotlib.gridspec as gridspec
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from status.util import SafeHandler
class ClustersPerLaneHandler(SafeHandler):
""" Serves a page with a plot of distribution of lane read production for a provided
time interval.
"""
def get(self):
names= self.application.flowcells_db.view("lanes/clusters")
start=names.rows[0].key[0][:6]
end=names.rows[-1].key[0][:6]
start=start[:4]+'01'
end=end[:4]+'31'
t = self.application.loader.load("clusters_per_lane.html")
self.write(t.generate(gs_globals=self.application.gs_globals, user = self.get_current_user_name(), start=start, end=end))
class ClustersPerLanePlotHandler(SafeHandler):
""" Serves a plot of distribution of lane read production for a provided
time interval.
Loaded through /api/v1/plot/clusters_per_lane.png
"""
def get(self):
start = self.get_argument("start", "")
end = self.get_argument("end", "Z")
lanes = self.application.flowcells_db.view("lanes/clusters")
yields_per_lane = []
for lane in lanes[[start, ""]:[end, "Z"]]:
y = lane.value.get("filtered_clusters")
if y:
yields_per_lane.append(y)
if not yields_per_lane:
self.set_header("Content-Type", "text/html")
self.write("No flowcells found in this time frame.")
else:
gs = gridspec.GridSpec(16, 1)
fig = Figure(figsize=[10, 8])
portion = 2
ax = fig.add_subplot(gs[:-portion, 0])
ax.hist(yields_per_lane, bins=32)
ax.grid(b='on')
ax.set_ylabel("Lanes")
ax.spines["bottom"].set_color('none')
ax.get_xaxis().set_visible(False)
ax = fig.add_subplot(gs[-portion:, 0])
ax.grid(b='on', axis='x')
ax.boxplot(yields_per_lane, vert=False, patch_artist=True)
ax.set_xlabel("Clusters")
ax.get_yaxis().set_visible(False)
ax.spines["top"].set_linewidth(2)
fig.subplots_adjust(hspace=0)
FigureCanvasAgg(fig)
buf = cStringIO.StringIO()
fig.savefig(buf, format="png")
data = buf.getvalue()
self.set_header("Content-Type", "image/png")
self.set_header("Content-Length", len(data))
self.write(data)
| mit |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/lines_bars_and_markers/fill_betweenx_demo.py | 1 | 2297 | """
==================
Fill Betweenx Demo
==================
Using ``fill_betweenx`` to color between two horizontal curves.
"""
import matplotlib.pyplot as plt
import numpy as np
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
y = np.arange(0.0, 2, 0.01)
x1 = np.sin(2 * np.pi * y)
x2 = 1.2 * np.sin(4 * np.pi * y)
fig, [ax1, ax2, ax3] = plt.subplots(3, 1, sharex=True)
ax1.fill_betweenx(y, 0, x1)
ax1.set_ylabel('(x1, 0)')
ax2.fill_betweenx(y, x1, 1)
ax2.set_ylabel('(x1, 1)')
ax3.fill_betweenx(y, x1, x2)
ax3.set_ylabel('(x1, x2)')
ax3.set_xlabel('x')
# now fill between x1 and x2 where a logical condition is met. Note
# this is different than calling
# fill_between(y[where], x1[where], x2[where])
# because of edge effects over multiple contiguous regions.
fig, [ax, ax1] = plt.subplots(2, 1, sharex=True)
ax.plot(x1, y, x2, y, color='black')
ax.fill_betweenx(y, x1, x2, where=x2 >= x1, facecolor='green')
ax.fill_betweenx(y, x1, x2, where=x2 <= x1, facecolor='red')
ax.set_title('fill between where')
# Test support for masked arrays.
x2 = np.ma.masked_greater(x2, 1.0)
ax1.plot(x1, y, x2, y, color='black')
ax1.fill_betweenx(y, x1, x2, where=x2 >= x1, facecolor='green')
ax1.fill_betweenx(y, x1, x2, where=x2 <= x1, facecolor='red')
ax1.set_title('Now regions with x2 > 1 are masked')
# This example illustrates a problem; because of the data
# gridding, there are undesired unfilled triangles at the crossover
# points. A brute-force solution would be to interpolate all
# arrays to a very fine grid before plotting.
pltshow(plt)
| mit |
sinamoeini/mapp4py | doc/src/conf.py | 1 | 3581 | from __future__ import division, absolute_import, print_function
import sys, os, re, numpydoc
import sphinx
if sphinx.__version__ < "1.0.1":
raise RuntimeError("Sphinx 1.0.1 or newer required")
needs_sphinx = '1.0'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
sys.path.insert(0, os.path.abspath('../sphinxext'))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.imgmath','numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.autosummary',
'matplotlib.sphinxext.plot_directive','sphinxcontrib.bibtex',
]
templates_path = ['__templates']
source_suffix = '.rst'
project = u'MAPP'
copyright = u'2017, Sina Moeini'
author = u'Sina Moeini'
version = u'beta'
release = u'beta'
master_doc = 'index'
add_function_parentheses = False
numpydoc_edit_link = False
numpydoc_use_plots = False
numpydoc_show_class_members = True
numpydoc_class_members_toctree = True
numpydoc_show_inherited_class_members = True
today_fmt = '%B %d, %Y'
default_role = "autolink"
exclude_dirs = []
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
if not os.path.isdir(themedir):
raise RuntimeError("Get the scipy-sphinx-theme first, "
"via git submodule init && git submodule update")
html_theme = 'scipy'
html_theme_path = [themedir]
html_theme_options = {
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
"rootlinks": []
}
#html_sidebars = {'index': 'indexsidebar.html'}
html_title = "%s v%s Manual" % (project, version)
html_static_path = ['__static']
html_last_updated_fmt = '%b %d, %Y'
html_use_modindex = True
html_copy_source = False
html_domain_indices = False
html_file_suffix = '.html'
html_logo = "imgs/logo.png"
htmlhelp_basename = 'mapp'
imgmath_use_preview = True
imgmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
# -----------------------------------------------------------------------------
# Texinfo output
# -----------------------------------------------------------------------------
texinfo_documents = [
(master_doc, 'MAPP', u'MAPP Documentation',
author, 'MAPP', 'One line description of project.',
'Miscellaneous'),
]
# -----------------------------------------------------------------------------
# NumPy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
import glob
autosummary_generate = glob.glob("*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
| mit |
jiafeimaowudi/KnowlegeableCNN | structureTestOnMonster2ShareHighLevelOnlyLogistic.py | 1 | 10737 | from theano import tensor as T, printing
import theano
import numpy
from mlp import HiddenLayer
from logistic_sgd_lazy import LogisticRegression
from DocEmbeddingNN import DocEmbeddingNN
# from DocEmbeddingNNPadding import DocEmbeddingNN
from knoweagebleClassifyFlattenedLazy import CorpusReader
import cPickle
import os
import math
import sys
from sklearn.metrics import roc_curve, auc
def work(mode, data_name, test_dataname, pooling_mode="average_exc_pad"):
print "mode: ", mode
print "data_name: ", data_name
print "pooling_mode: ", pooling_mode
print "Started!"
data_names = data_name.split(":")
data_count = len(data_names)
print "Train dataset:"
for i in xrange(data_count):
print "%d: %s" % (i, data_names[i])
print "Test dataset:"
test_data_names = test_dataname.split(":")
test_data_count = len(test_data_names)
for i in xrange(test_data_count):
print "%d: %s" % (i, test_data_names[i])
if test_data_count != data_count:
raise Exception("The amount of test and train dataset must be the same.")
rng = numpy.random.RandomState(23455)
docSentenceCount = T.ivector("docSentenceCount")
sentenceWordCount = T.ivector("sentenceWordCount")
corpus = T.matrix("corpus")
docLabel = T.ivector('docLabel')
hidden_layer_w = None
hidden_layer_b = None
logistic_layer_w = None
logistic_layer_b = None
layer0 = list()
layer1 = list()
layer2 = list()
local_params = list()
# for list-type data
for i in xrange(data_count):
layer0.append(DocEmbeddingNN(corpus, docSentenceCount, sentenceWordCount, rng, wordEmbeddingDim=200, \
sentenceLayerNodesNum=50, \
sentenceLayerNodesSize=[5, 200], \
docLayerNodesNum=10, \
docLayerNodesSize=[3, 50],
pooling_mode=pooling_mode))
layer1.append(HiddenLayer(
rng,
input=layer0[i].output,
n_in=layer0[i].outputDimension,
n_out=10,
activation=T.tanh,
W=hidden_layer_w,
b=hidden_layer_b
))
# hidden_layer_w = layer1[i].W
# hidden_layer_b = layer1[i].b
layer2.append(LogisticRegression(input=layer1[i].output, n_in=10, n_out=2, W=logistic_layer_w, b=logistic_layer_b))
logistic_layer_w = layer2[i].W
logistic_layer_b = layer2[i].b
local_params.append(layer0[i].params + layer1[i].params)
share_params = list(layer2[0].params)
# construct the parameter array.
params = list(layer2[0].params)
for i in xrange(data_count):
params += layer1[0].params + layer0[i].params
# data_name = "car"
para_path = "data/" + data_name + "/log_model/" + pooling_mode + ".model"
traintext = ["data/" + data_names[i] + "/train/text" for i in xrange(data_count)]
trainlabel = ["data/" + data_names[i] + "/train/label" for i in xrange(data_count)]
testtext = ["data/" + test_data_names[i] + "/test/text" for i in xrange(data_count)]
testlabel = ["data/" + test_data_names[i] + "/test/label" for i in xrange(data_count)]
# Load the parameters last time, optionally.
loadParamsVal(para_path, params)
if(mode == "train" or mode == "test"):
train_model = list()
valid_model = list()
print "Loading train data."
batchSize = 10
share_learning_rate = 0.01
local_learning_rate = 0.1
n_batches = list()
print "Loading test data."
for i in xrange(data_count):
cr_train = CorpusReader(minDocSentenceNum=5, minSentenceWordNum=5, dataset=traintext[i], labelset=trainlabel[i])
docMatrixes, docSentenceNums, sentenceWordNums, ids, labels, _, _ = cr_train.getCorpus([0, 100000])
docMatrixes = transToTensor(docMatrixes, theano.config.floatX)
docSentenceNums = transToTensor(docSentenceNums, numpy.int32)
sentenceWordNums = transToTensor(sentenceWordNums, numpy.int32)
labels = transToTensor(labels, numpy.int32)
index = T.lscalar("index")
n_batches.append((len(docSentenceNums.get_value()) - 1 - 1) / batchSize + 1)
print "Dataname: %s" % data_names[i]
print "Train set size is ", len(docMatrixes.get_value())
print "Batch size is ", batchSize
print "Number of training batches is ", n_batches[i]
error = layer2[i].errors(docLabel)
cost = layer2[i].negative_log_likelihood(docLabel)
share_grads = T.grad(cost, share_params)
share_updates = [
(param_i, param_i - share_learning_rate * grad_i)
for param_i, grad_i in zip(share_params, share_grads)
]
grads = T.grad(cost, local_params[i])
local_updates = [
(param_i, param_i - local_learning_rate * grad_i)
for param_i, grad_i in zip(local_params[i], grads)
]
updates = share_updates + local_updates
print "Compiling train computing graph."
if mode == "train":
train_model.append(theano.function(
[index],
[cost, error, layer2[i].y_pred, docLabel],
updates=updates,
givens={
corpus: docMatrixes,
docSentenceCount: docSentenceNums[index * batchSize: (index + 1) * batchSize + 1],
sentenceWordCount: sentenceWordNums,
docLabel: labels[index * batchSize: (index + 1) * batchSize]
}
))
print "Compiled."
print "Load test dataname: %s" % test_data_names[i]
cr_test = CorpusReader(minDocSentenceNum=5, minSentenceWordNum=5, dataset=testtext[i], labelset=testlabel[i])
validDocMatrixes, validDocSentenceNums, validSentenceWordNums, validIds, validLabels, _, _ = cr_test.getCorpus([0, 1000])
validDocMatrixes = transToTensor(validDocMatrixes, theano.config.floatX)
validDocSentenceNums = transToTensor(validDocSentenceNums, numpy.int32)
validSentenceWordNums = transToTensor(validSentenceWordNums, numpy.int32)
validLabels = transToTensor(validLabels, numpy.int32)
print "Validating set size is ", len(validDocMatrixes.get_value())
print "Data loaded."
print "Compiling test computing graph."
valid_model.append(theano.function(
[],
[cost, error, layer2[i].y_pred, docLabel, T.transpose(layer2[i].p_y_given_x)[1]],
givens={
corpus: validDocMatrixes,
docSentenceCount: validDocSentenceNums,
sentenceWordCount: validSentenceWordNums,
docLabel: validLabels
}
))
print "Compiled."
costNum, errorNum, pred_label, real_label, pred_prob = valid_model[i]()
print "Valid current model :", data_names[i]
print "Cost: ", costNum
print "Error: ", errorNum
fpr, tpr, _ = roc_curve(real_label, pred_prob)
roc_auc = auc(fpr, tpr)
print "data_name: ", data_name
print "ROC: ", roc_auc
fpr, tpr, threshold = roc_curve(real_label, pred_label)
if 1 in threshold:
index_of_one = list(threshold).index(1)
print "TPR: ", tpr[index_of_one]
print "FPR: ", fpr[index_of_one]
print "threshold: ", threshold[index_of_one]
if mode == "test":
return
print "Start to train."
epoch = 0
n_epochs = 10
ite = 0
# ####Validate the model####
# for dataset_index in xrange(data_count):
# costNum, errorNum, pred_label, real_label, pred_prob = valid_model[dataset_index]()
# print "Valid current model :", data_names[dataset_index]
# print "Cost: ", costNum
# print "Error: ", errorNum
#
# fpr, tpr, _ = roc_curve(real_label, pred_prob)
# roc_auc = auc(fpr, tpr)
# print "data_name: ", data_name
# print "ROC: ", roc_auc
# fpr, tpr, threshold = roc_curve(real_label, pred_label)
# index_of_one = list(threshold).index(1)
# print "TPR: ", tpr[index_of_one]
# print "FPR: ", fpr[index_of_one]
# print "threshold: ", threshold[index_of_one]
while (epoch < n_epochs):
epoch = epoch + 1
#######################
for i in range(max(n_batches)):
for dataset_index in xrange(data_count):
if i >= n_batches[dataset_index]:
continue
# for list-type data
print "dataset_index: %d, i: %d" %(dataset_index, i)
costNum, errorNum, pred_label, real_label = train_model[dataset_index](i)
ite = ite + 1
# for padding data
if(ite % 10 == 0):
print
print "Dataset name: ", data_names[dataset_index]
print "@iter: ", ite
print "Cost: ", costNum
print "Error: ", errorNum
# Validate the model
for dataset_index in xrange(data_count):
costNum, errorNum, pred_label, real_label, pred_prob = valid_model[dataset_index]()
print "Valid current model :", data_names[dataset_index]
print "Cost: ", costNum
print "Error: ", errorNum
fpr, tpr, _ = roc_curve(real_label, pred_prob)
roc_auc = auc(fpr, tpr)
print "data_name: ", data_name
print "ROC: ", roc_auc
fpr, tpr, threshold = roc_curve(real_label, pred_label)
index_of_one = list(threshold).index(1)
print "TPR: ", tpr[index_of_one]
print "FPR: ", fpr[index_of_one]
print "threshold: ", threshold[index_of_one]
# Save model
print "Saving parameters."
saveParamsVal(para_path, params)
print "Saved."
# elif(mode == "deploy"):
# print "Compiling computing graph."
# output_model = theano.function(
# [corpus, docSentenceCount, sentenceWordCount],
# [layer2.y_pred]
# )
# print "Compiled."
# cr = CorpusReader(minDocSentenceNum=5, minSentenceWordNum=5, dataset="data/train_valid/split")
# count = 21000
# while(count <= 21000):
# docMatrixes, docSentenceNums, sentenceWordNums, ids = cr.getCorpus([count, count + 100])
# docMatrixes = numpy.matrix(
# docMatrixes,
# dtype=theano.config.floatX
# )
# docSentenceNums = numpy.array(
# docSentenceNums,
# dtype=numpy.int32
# )
# sentenceWordNums = numpy.array(
# sentenceWordNums,
# dtype=numpy.int32
# )
# print "start to predict."
# pred_y = output_model(docMatrixes, docSentenceNums, sentenceWordNums)
# print "End predicting."
# print "Writing resfile."
# # print zip(ids, pred_y[0])
# f = file("data/test/res/res" + str(count), "w")
# f.write(str(zip(ids, pred_y[0])))
# f.close()
# print "Written." + str(count)
# count += 100
def saveParamsVal(path, params):
with open(path, 'wb') as f: # open file with write-mode
for para in params:
cPickle.dump(para.get_value(), f, protocol=cPickle.HIGHEST_PROTOCOL) # serialize and save object
def loadParamsVal(path, params):
if(not os.path.exists(path)):
return None
try:
with open(path, 'rb') as f: # open file with write-mode
for para in params:
para.set_value(cPickle.load(f), borrow=True)
except:
pass
def transToTensor(data, t):
return theano.shared(
numpy.array(
data,
dtype=t
),
borrow=True
)
if __name__ == '__main__':
work(mode=sys.argv[1], data_name=sys.argv[2], test_dataname=sys.argv[3], pooling_mode=sys.argv[4])
print "All finished!"
| gpl-2.0 |
workflo/dxf2gcode | python_examples/Kegelstump_Abwicklung_2dxf.py | 1 | 12174 | #!/usr/bin/python
# -*- coding: cp1252 -*-
#
#Kegelstump_Abwicklung_2dxf.py
#Programmer: Christian Kohlöffel
#E-mail: n/A
#
#Copyright 2008 Christian Kohlöffel
#
#Distributed under the terms of the GPL (GNU Public License)
#
#dxf2gcode is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import matplotlib
#matplotlib see: http://matplotlib.sourceforge.net/ and http://www.scipy.org/Cookbook/Matplotlib/
#numpy see: http://numpy.scipy.org/ and http://sourceforge.net/projects/numpy/
matplotlib.use('TkAgg')
from matplotlib.numerix import arange, sin, pi
from matplotlib.axes import Subplot
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from Tkconstants import TOP, BOTH, BOTTOM, LEFT, RIGHT,GROOVE
from Tkinter import Tk, Button, Frame
from math import sqrt, sin, cos, tan, tanh, atan, radians, degrees, pi, floor, ceil
import sys
class KegelstClass:
def __init__(self):
#Max Abweichung für die Biarc Kurve
self.Dm_oben=10.0
self.Dm_unten=20.0
self.Hoehe=10.0
self.Winkel_oben=radians(45.0)
self.Winkel_unten=radians(0.0)
#Darstellungsdetail (Segmente pro 360°)
self.segments=20
self.geo=[]
#Berechnung der Standardwerte
self.calc_SchraegerSchnitt()
#Berechnung von Werten und erstellen der Geometrien
step=10
points,radius=self.SchrSchn_u.calc_point(0.0)
for i in range(step,360+step,step):
pointe,radius=self.SchrSchn_u.calc_point(radians(i))
self.geo.append(LineGeo(points,pointe))
points=pointe
#Berechnung von Werten und erstellen der Geometrien
step=10
points,radius=self.SchrSchn_o.calc_point(0.0)
for i in range(step,360+step,step):
pointe,radius=self.SchrSchn_o.calc_point(radians(i))
self.geo.append(LineGeo(points,pointe))
points=pointe
## #Berechnen des Startpunkts:
## self.phi_kurve_oben=[0]
## self.kurve_oben=[self.abstand_schnittkante_zu_spitze(\
## 0,self.schnitthoehe_oben,self.schnittwinkel_oben)]
##
## for i in range(self.segments):
## phi=radians(360)/self.segments*(i+1)
## self.phi_kurve_oben.append(phi)
## self.kurve_oben.append(self.abstand_schnittkante_zu_spitze(\
## phi,self.schnitthoehe_oben,self.schnittwinkel_oben))
##
## Pa=PointClass(self.phi_kurve_oben[-2],self.kurve_oben[-2])
## Pe=PointClass(self.phi_kurve_oben[-1],self.kurve_oben[-1])
##
## self.geo.append(LineGeo(Pa,Pe))
def calc_SchraegerSchnitt(self):
#Sonderfall Zero Division
if self.Dm_oben==self.Dm_unten:
self.sch_winkel=radians(90.0)
else:
self.sch_winkel=atan(self.Hoehe*2/(-self.Dm_oben+self.Dm_unten))
self.SchrSchn_u=SchraegerSchnitt(dm=self.Dm_unten,a1=self.sch_winkel,a2=self.Winkel_unten)
self.SchrSchn_o=SchraegerSchnitt(dm=self.Dm_oben,a1=self.sch_winkel,a2=self.Winkel_oben)
def __str__(self):
return("DM_oben: %0.1f\n" %self.Dm_oben)+\
("Dm_unten: %0.1f\n" %self.Dm_unten)+\
("Hoehe: %0.1f\n" %self.Hoehe)+\
("Winkel_oben: %0.1f\n" %degrees(self.Winkel_oben))+\
("Winkel_unten: %0.1f\n" %degrees(self.Winkel_unten))+\
("sch_winkel: %0.1f\n" %degrees(self.sch_winkel))+\
("Schnitt unten:\n%s" %self.SchrSchn_u)+\
("Schnitt oben:\n%s" %self.SchrSchn_o)
class SchraegerSchnitt:
def __init__(self,dm=20,a1=0,a2=0):
self.dm=dm
self.a1=a1
self.a2=a2
#Berechnen der Funktion
self.calc_std_parameters()
## print self
## print ("vers: %0.2f \n" %self.vers)+\
## ("vers_h: %0.2f \n" %self.vers_h)+\
## ("vers_d: %0.2f \n" %self.vers_d)+\
## ("dmv: %0.2f \n" %self.dmv)
def calc_point(self,phi=0.0,rotation=0.0):#PointClass(0,0),Radius
#große Halbachse, kleine Halbachse, rotation der Ellipse (rad), Winkel des Punkts in der Ellipse (rad)
Ex = self.De*cos(phi) * cos(rotation) - self.de*sin(phi) * sin(rotation)+self.vers_d*cos(rotation);
Ey = self.De*cos(phi) * sin(rotation) + self.de*sin(phi) * cos(rotation)+self.vers_d*sin(rotation);
Radius=sqrt(Ex**2+Ey**2)
return PointClass(Ex, Ey),Radius
def calc_std_parameters(self):
#Berechnung der langen Seite der Ellipse
self.De1=self.dm*sin(radians(180)-self.a1)/(2*sin(self.a1-self.a2))
self.De2=self.dm*sin(self.a1)/(2*sin(radians(180)-self.a1-self.a2))
self.De=self.De1+self.De2
#Berechnung des Versatzes von der Mittellinie
self.vers=(self.De2-(self.De/2))
self.vers_h=self.vers*sin(self.a2)
self.vers_d=self.vers*cos(self.a2)
#Berechnung der kurzen Seite der Ellipse
self.dmv=self.dm-2*self.vers_h/tan(self.a1)
self.de=2*sqrt((self.dmv/2)**2-(self.vers_d/2)**2)
def __str__(self):
return("dm: %0.2f\n" %self.dm)+\
("a1: %0.2f\n" %degrees(self.a1))+\
("a2: %0.2f\n" %degrees(self.a2))+\
("De: %0.2f\n" %self.De)+\
("de: %0.2f\n" %self.de)
class ArcGeo:
def __init__(self,Pa=None,Pe=None,O=None,r=1,s_ang=None,e_ang=None,dir=1):
self.type="ArcGeo"
self.Pa=Pa
self.Pe=Pe
self.O=O
self.r=abs(r)
#Falls nicht übergeben dann Anfangs- und Endwinkel ausrechen
if type(s_ang)==type(None):
s_ang=O.norm_angle(Pa)
if type(e_ang)==type(None):
e_ang=O.norm_angle(Pe)
#Aus dem Vorzeichen von dir den extend ausrechnen
self.ext=e_ang-s_ang
if dir>0.0:
self.ext=self.ext%(-2*pi)
self.ext-=floor(self.ext/(2*pi))*(2*pi)
else:
self.ext=self.ext%(-2*pi)
self.ext+=ceil(self.ext/(2*pi))*(2*pi)
self.s_ang=s_ang
self.e_ang=e_ang
self.length=self.r*abs(self.ext)
def plot2plot(self, plot):
x=[]; y=[]
#Alle 6 Grad ein Linien Segment Drucken
segments=int((abs(degrees(self.ext))//6)+1)
for i in range(segments+1):
ang=self.s_ang+i*self.ext/segments
x.append(self.O.x+cos(ang)*abs(self.r))
y.append(self.O.y+sin(ang)*abs(self.r))
plot.plot(x,y,'-g')
#plot.plot([x[0],x[-1]],[y[0],y[-1]],'cd')
plot.plot([self.Pa.x,self.Pe.x],[self.Pa.y,self.Pe.y],'cd')
def __str__(self):
return ("\nARC")+\
("\nPa : %s; s_ang: %0.5f" %(self.Pa,self.s_ang))+\
("\nPe : %s; e_ang: %0.5f" %(self.Pe,self.e_ang))+\
("\nO : %s; r: %0.3f" %(self.O,self.r))+\
("\next : %0.5f; length: %0.5f" %(self.ext,self.length))
class LineGeo:
def __init__(self,Pa,Pe):
self.type="LineGeo"
self.Pa=Pa
self.Pe=Pe
self.length=self.Pa.distance(self.Pe)
def get_start_end_points(self,direction):
if direction==0:
punkt=self.Pa
angle=self.Pe.norm_angle(self.Pa)
elif direction==1:
punkt=self.Pe
angle=self.Pa.norm_angle(self.Pe)
return punkt, angle
def plot2plot(self, plot):
plot.plot([self.Pa.x,self.Pe.x],[self.Pa.y,self.Pe.y],'-dm')
def distance2point(self,point):
AE=self.Pa.distance(self.Pe)
AP=self.Pa.distance(point)
EP=self.Pe.distance(point)
AEPA=(AE+AP+EP)/2
return abs(2*sqrt(abs(AEPA*(AEPA-AE)*(AEPA-AP)*(AEPA-EP)))/AE)
def __str__(self):
return ("\nLINE")+\
("\nPa : %s" %self.Pa)+\
("\nPe : %s" %self.Pe)+\
("\nlength: %0.5f" %self.length)
class PointClass:
def __init__(self,x=0,y=0):
self.x=x
self.y=y
def __str__(self):
return ('X ->%6.4f Y ->%6.4f' %(self.x,self.y))
def __cmp__(self, other) :
return (self.x == other.x) and (self.y == other.y)
def __neg__(self):
return -1.0*self
def __add__(self, other): # add to another point
return PointClass(self.x+other.x, self.y+other.y)
def __sub__(self, other):
return self + -other
def __rmul__(self, other):
return PointClass(other * self.x, other * self.y)
def __mul__(self, other):
if type(other)==list:
#Skalieren des Punkts
return PointClass(x=self.x*other[0],y=self.y*other[1])
else:
#Skalarprodukt errechnen
return self.x*other.x + self.y*other.y
def unit_vector(self,Pto=None):
diffVec=Pto-self
l=diffVec.distance()
return PointClass(diffVec.x/l,diffVec.y/l)
def distance(self,other=None):
if type(other)==type(None):
other=PointClass(x=0.0,y=0.0)
return sqrt(pow(self.x-other.x,2)+pow(self.y-other.y,2))
def norm_angle(self,other=None):
if type(other)==type(None):
other=PointClass(x=0.0,y=0.0)
return atan2(other.y-self.y,other.x-self.x)
def isintol(self,other,tol):
return (abs(self.x-other.x)<=tol) & (abs(self.y-other.y)<tol)
def transform_to_Norm_Coord(self,other,alpha):
xt=other.x+self.x*cos(alpha)+self.y*sin(alpha)
yt=other.y+self.x*sin(alpha)+self.y*cos(alpha)
return PointClass(x=xt,y=yt)
def get_arc_point(self,ang=0,r=1):
return PointClass(x=self.x+cos(radians(ang))*r,\
y=self.y+sin(radians(ang))*r)
def triangle_height(self,other1,other2):
#Die 3 Längen des Dreiecks ausrechnen
a=self.distance(other1)
b=other1.distance(other2)
c=self.distance(other2)
return sqrt(pow(b,2)-pow((pow(c,2)+pow(b,2)-pow(a,2))/(2*c),2))
class PlotClass:
def __init__(self,master=[]):
self.master=master
#Erstellen des Fensters mit Rahmen und Canvas
self.figure = Figure(figsize=(7,7), dpi=100)
self.frame_c=Frame(relief = GROOVE,bd = 2)
self.frame_c.pack(fill=BOTH, expand=1,)
self.canvas = FigureCanvasTkAgg(self.figure, master=self.frame_c)
self.canvas.show()
self.canvas.get_tk_widget().pack(fill=BOTH, expand=1)
#Erstellen der Toolbar unten
self.toolbar = NavigationToolbar2TkAgg(self.canvas, self.frame_c)
self.toolbar.update()
self.canvas._tkcanvas.pack( fill=BOTH, expand=1)
def make_erg_plot(self,kegelst):
self.plot1 = self.figure.add_subplot(111)
self.plot1.set_title("Kegelstumpf Abwicklung")
self.plot1.hold(True)
for geo in kegelst.geo:
geo.plot2plot(self.plot1)
if 1:
master = Tk()
kegelst=KegelstClass()
master.title("Kegelstumpfabwicklung 2 DXF")
Pl=PlotClass(master)
Pl.make_erg_plot(kegelst)
master.mainloop()
| gpl-3.0 |
kntem/webdeposit | modules/bibauthorid/lib/bibauthorid_tortoise.py | 3 | 15730 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio import bibauthorid_config as bconfig
from datetime import datetime
import os
#import cPickle as SER
import msgpack as SER
import gc
import matplotlib.pyplot as plt
import numpy as np
#This is supposed to defeat a bit of the python vm performance losses:
import sys
sys.setcheckinterval(1000000)
from collections import defaultdict
from itertools import groupby, chain, repeat
from invenio.bibauthorid_general_utils import update_status, update_status_final, override_stdout_config
from invenio.bibauthorid_cluster_set import delayed_cluster_sets_from_marktables
from invenio.bibauthorid_cluster_set import delayed_cluster_sets_from_personid
from invenio.bibauthorid_wedge import wedge
from invenio.bibauthorid_name_utils import generate_last_name_cluster_str
from invenio.bibauthorid_backinterface import empty_results_table
from invenio.bibauthorid_backinterface import remove_result_cluster
from invenio.bibauthorid_general_utils import bibauthor_print
from invenio.bibauthorid_prob_matrix import prepare_matirx
from invenio.bibauthorid_scheduler import schedule, matrix_coefs
from invenio.bibauthorid_least_squares import to_function as create_approx_func
from math import isnan
import multiprocessing as mp
#python2.4 compatibility
from invenio.bibauthorid_general_utils import bai_all as all
'''
There are three main entry points to tortoise
i) tortoise
Performs disambiguation iteration.
The arguemnt pure indicates whether to use
the claims and the rejections or not.
Use pure=True only to test the accuracy of tortoise.
ii) tortoise_from_scratch
NOT RECOMMENDED!
Use this function only if you have just
installed invenio and this is your first
disambiguation or if personid is broken.
iii) tortoise_last_name
Computes the clusters for only one last name
group. Is is primary used for testing. It
may also be used to fix a broken last name
cluster. It does not involve multiprocessing
so it is convinient to debug with pdb.
'''
# Exit codes:
# The standard ones are not well documented
# so we are using random numbers.
def tortoise_from_scratch():
bibauthor_print("Preparing cluster sets.")
cluster_sets, _lnames, sizes = delayed_cluster_sets_from_marktables()
bibauthor_print("Building all matrices.")
exit_statuses = schedule_create_matrix(
cluster_sets,
sizes,
force=True)
assert len(exit_statuses) == len(cluster_sets)
assert all(stat == os.EX_OK for stat in exit_statuses)
empty_results_table()
bibauthor_print("Preparing cluster sets.")
cluster_sets, _lnames, sizes = delayed_cluster_sets_from_marktables()
bibauthor_print("Starting disambiguation.")
exit_statuses = schedule_wedge_and_store(
cluster_sets,
sizes)
assert len(exit_statuses) == len(cluster_sets)
assert all(stat == os.EX_OK for stat in exit_statuses)
def tortoise(pure=False,
force_matrix_creation=False,
skip_matrix_creation=False,
last_run=None):
assert not force_matrix_creation or not skip_matrix_creation
# The computation must be forced in case we want
# to compute pure results
force_matrix_creation = force_matrix_creation or pure
if not skip_matrix_creation:
bibauthor_print("Preparing cluster sets.")
clusters, _lnames, sizes = delayed_cluster_sets_from_personid(pure, last_run)
bibauthor_print("Building all matrices.")
exit_statuses = schedule_create_matrix(
clusters,
sizes,
force=force_matrix_creation)
assert len(exit_statuses) == len(clusters)
assert all(stat == os.EX_OK for stat in exit_statuses)
bibauthor_print("Preparing cluster sets.")
clusters, _lnames, sizes = delayed_cluster_sets_from_personid(pure, last_run)
bibauthor_print("Starting disambiguation.")
exit_statuses = schedule_wedge_and_store(
clusters,
sizes)
assert len(exit_statuses) == len(clusters)
assert all(stat == os.EX_OK for stat in exit_statuses)
def tortoise_last_name(name, from_mark=False, pure=False):
bibauthor_print('Start working on %s' % name)
assert not(from_mark and pure)
lname = generate_last_name_cluster_str(name)
if from_mark:
bibauthor_print(' ... from mark!')
clusters, lnames, sizes = delayed_cluster_sets_from_marktables([lname])
bibauthor_print(' ... delayed done')
else:
bibauthor_print(' ... from pid, pure')
clusters, lnames, sizes = delayed_cluster_sets_from_personid(pure)
bibauthor_print(' ... delayed pure done!')
# try:
idx = lnames.index(lname)
cluster = clusters[idx]
size = sizes[idx]
cluster_set = cluster()
bibauthor_print("Found, %s(%s). Total number of bibs: %d." % (name, lname, size))
create_matrix(cluster_set, True)
wedge_and_store(cluster_set)
# except IndexError:
# bibauthor_print("Sorry, %s(%s) not found in the last name clusters" % (name, lname))
def _collect_statistics_lname_coeff(params):
lname = params[0]
coeff = params[1]
clusters, lnames, sizes = delayed_cluster_sets_from_marktables([lname])
idx = lnames.index(lname)
cluster = clusters[idx]
size = sizes[idx]
bibauthor_print("Found, %s. Total number of bibs: %d." % (lname, size))
cluster_set = cluster()
create_matrix(cluster_set, False)
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
bibauthor_print("Start working on %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
wedge(cluster_set, True, coeff)
remove_result_cluster(cluster_set.last_name)
def _create_matrix(lname):
clusters, lnames, sizes = delayed_cluster_sets_from_marktables([lname])
idx = lnames.index(lname)
cluster = clusters[idx]
size = sizes[idx]
bibauthor_print("Found, %s. Total number of bibs: %d." % (lname, size))
cluster_set = cluster()
create_matrix(cluster_set, True)
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
bibauthor_print("Start working on %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
cluster_set.store()
def tortoise_tweak_coefficient(lastnames, min_coef, max_coef, stepping, create_matrix=True):
bibauthor_print('Coefficient tweaking!')
bibauthor_print('Cluster sets from mark...')
lnames = set([generate_last_name_cluster_str(n) for n in lastnames])
coefficients = [x/100. for x in range(int(min_coef*100),int(max_coef*100),int(stepping*100))]
pool = mp.Pool()
if create_matrix:
pool.map(_create_matrix, lnames)
pool.map(_collect_statistics_lname_coeff, ((x,y) for x in lnames for y in coefficients ))
def _gen_plot(data, filename):
plt.clf()
ax = plt.subplot(111)
ax.grid(visible=True)
x = sorted(data.keys())
w = [data[k][0] for k in x]
try:
wscf = max(w)
except:
wscf = 0
w = [float(i)/wscf for i in w]
y = [data[k][1] for k in x]
maxi = [data[k][3] for k in x]
mini = [data[k][2] for k in x]
lengs = [data[k][4] for k in x]
try:
ml = float(max(lengs))
except:
ml = 1
lengs = [k/ml for k in lengs]
normalengs = [data[k][5] for k in x]
ax.plot(x,y,'-o',label='avg')
ax.plot(x,maxi,'-o', label='max')
ax.plot(x,mini,'-o', label='min')
ax.plot(x,w, '-x', label='norm %s' % str(wscf))
ax.plot(x,lengs,'-o',label='acl %s' % str(int(ml)))
ax.plot(x,normalengs, '-o', label='ncl')
plt.ylim(ymax = 1., ymin = -0.01)
plt.xlim(xmax = 1., xmin = -0.01)
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=6, mode="expand", borderaxespad=0.)
plt.savefig(filename)
def tortoise_coefficient_statistics(pickle_output=None, generate_graphs=True):
override_stdout_config(stdout=True)
files = ['/tmp/baistats/'+x for x in os.listdir('/tmp/baistats/') if x.startswith('cluster_status_report_pid')]
fnum = float(len(files))
quanta = .1/fnum
total_stats = 0
used_coeffs = set()
used_clusters = set()
#av_counter, avg, min, max, nclus, normalized_avg
cluster_stats = defaultdict(lambda : defaultdict(lambda : [0.,0.,0.,0.,0.,0.]))
coeff_stats = defaultdict(lambda : [0.,0.,0.,0.,0.,0.])
def gen_graphs(only_synthetic=False):
update_status(0, 'Generating coefficients graph...')
_gen_plot(coeff_stats, '/tmp/graphs/AAAAA-coefficients.svg')
if not only_synthetic:
cn = cluster_stats.keys()
l = float(len(cn))
for i,c in enumerate(cn):
update_status(i/l, 'Generating name graphs... %s' % str(c))
_gen_plot(cluster_stats[c], '/tmp/graphs/CS-%s.png' % str(c))
for i,fi in enumerate(files):
if generate_graphs:
if i%1000 ==0:
gen_graphs(True)
f = open(fi,'r')
status = i/fnum
update_status(status, 'Loading '+ fi[fi.find('lastname')+9:])
contents = SER.load(f)
f.close()
cur_coef = contents[0]
cur_clust = contents[1]
cur_maxlen = float(contents[3])
if cur_coef:
total_stats += 1
used_coeffs.add(cur_coef)
used_clusters.add(cur_clust)
update_status(status+0.2*quanta, ' Computing averages...')
cur_clen = len(contents[2])
cur_coeffs = [x[2] for x in contents[2]]
cur_clustnumber = float(len(set([x[0] for x in contents[2]])))
assert cur_clustnumber > 0 and cur_clustnumber < cur_maxlen, "Error, found log with strange clustnumber! %s %s %s %s" % (str(cur_clust), str(cur_coef), str(cur_maxlen),
str(cur_clustnumber))
if cur_coeffs:
assert len(cur_coeffs) == cur_clen and cur_coeffs, "Error, there is a cluster witohut stuff? %s %s %s"% (str(cur_clust), str(cur_coef), str(cur_coeffs))
assert all([x >= 0 and x <= 1 for x in cur_coeffs]), "Error, a coefficient is wrong here! Check me! %s %s %s" % (str(cur_clust), str(cur_coef), str(cur_coeffs))
cur_min = min(cur_coeffs)
cur_max = max(cur_coeffs)
cur_avg = sum(cur_coeffs)/cur_clen
update_status(status+0.4*quanta, ' comulative per coeff...')
avi = coeff_stats[cur_coef][0]
#number of points
coeff_stats[cur_coef][0] = avi+1
#average of coefficients
coeff_stats[cur_coef][1] = (coeff_stats[cur_coef][1]*avi + cur_avg)/(avi+1)
#min coeff
coeff_stats[cur_coef][2] = min(coeff_stats[cur_coef][2], cur_min)
#max coeff
coeff_stats[cur_coef][3] = max(coeff_stats[cur_coef][3], cur_max)
#avg number of clusters
coeff_stats[cur_coef][4] = (coeff_stats[cur_coef][4]*avi + cur_clustnumber)/(avi+1)
#normalized avg number of clusters
coeff_stats[cur_coef][5] = (coeff_stats[cur_coef][5]*avi + cur_clustnumber/cur_maxlen)/(avi+1)
update_status(status+0.6*quanta, ' comulative per cluster per coeff...')
avi = cluster_stats[cur_clust][cur_coef][0]
cluster_stats[cur_clust][cur_coef][0] = avi+1
cluster_stats[cur_clust][cur_coef][1] = (cluster_stats[cur_clust][cur_coef][1]*avi + cur_avg)/(avi+1)
cluster_stats[cur_clust][cur_coef][2] = min(cluster_stats[cur_clust][cur_coef][2], cur_min)
cluster_stats[cur_clust][cur_coef][3] = max(cluster_stats[cur_clust][cur_coef][3], cur_max)
cluster_stats[cur_clust][cur_coef][4] = (cluster_stats[cur_clust][cur_coef][4]*avi + cur_clustnumber)/(avi+1)
cluster_stats[cur_clust][cur_coef][5] = (cluster_stats[cur_clust][cur_coef][5]*avi + cur_clustnumber/cur_maxlen)/(avi+1)
update_status_final('Done!')
if generate_graphs:
gen_graphs()
if pickle_output:
update_status(0,'Dumping to file...')
f = open(pickle_output,'w')
SER.dump({'cluster_stats':dict((x,dict(cluster_stats[x])) for x in cluster_stats.iterkeys()), 'coeff_stats':dict((coeff_stats))}, f)
f.close()
def create_matrix(cluster_set, force):
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
bibauthor_print("Start building matrix for %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
return prepare_matirx(cluster_set, force)
def force_create_matrix(cluster_set, force):
bibauthor_print("Building a cluster set.")
return create_matrix(cluster_set(), force)
def wedge_and_store(cluster_set):
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
bibauthor_print("Start working on %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
wedge(cluster_set)
remove_result_cluster(cluster_set.last_name)
cluster_set.store()
return True
def force_wedge_and_store(cluster_set):
bibauthor_print("Building a cluster set.")
return wedge_and_store(cluster_set())
def schedule_create_matrix(cluster_sets, sizes, force):
def create_job(cluster):
def ret():
return force_create_matrix(cluster, force)
return ret
memfile_path = None
if bconfig.DEBUG_PROCESS_PEAK_MEMORY:
tt = datetime.now()
tt = (tt.hour, tt.minute, tt.day, tt.month, tt.year)
memfile_path = ('%smatrix_memory_%d:%d_%d-%d-%d.log' %
((bconfig.TORTOISE_FILES_PATH,) + tt))
return schedule(map(create_job, cluster_sets),
sizes,
create_approx_func(matrix_coefs),
memfile_path)
def schedule_wedge_and_store(cluster_sets, sizes):
def create_job(cluster):
def ret():
return force_wedge_and_store(cluster)
return ret
memfile_path = None
if bconfig.DEBUG_PROCESS_PEAK_MEMORY:
tt = datetime.now()
tt = (tt.hour, tt.minute, tt.day, tt.month, tt.year)
memfile_path = ('%swedge_memory_%d:%d_%d-%d-%d.log' %
((bconfig.TORTOISE_FILES_PATH,) + tt))
return schedule(map(create_job, cluster_sets),
sizes,
create_approx_func(matrix_coefs),
memfile_path)
| gpl-2.0 |
ngvozdiev/ctr-base | python/plot_link_paths.py | 1 | 3124 | from collections import defaultdict
from scipy import interpolate
import numpy as np
import matplotlib.pylab as plt
import parser_wrapper
import glob
import itertools
import matplotlib.patches as mpatches
import argparse
import matplotlib
matplotlib.rcParams.update({'font.size': 14})
parser = argparse.ArgumentParser(description='Plots link occupancy')
parser.add_argument('--file', type=str, help='Metric file')
parser.add_argument('--sofile', type=str, help='Library file for parser', default='libmetrics_parser.dylib')
parser.add_argument('--metric', type=str, help='Metric id', default='path_bytes')
parser.add_argument('--x_min', type=float, default=0)
parser.add_argument('--x_max', type=float, default=2000)
args = parser.parse_args()
INTERESTING_LINKS = ['N0->N1', 'N4->N5', 'N8->N9', 'N12->N13']
p = parser_wrapper.MetricsParser(args.file, args.sofile)
data = p.Parse(args.metric, '.*', deltas=True)
print data
ax_f, axarr = plt.subplots(len(INTERESTING_LINKS), sharex=True, sharey=True)
def SrcDstLabel(src, dst):
s = str(src) + u'\u2192' + str(dst)
return s.replace('N', '')
def AggFromPath(path):
path = path.split('[')[1].split(']')[0]
pieces = path.split('->')
return SrcDstLabel(pieces[0], pieces[-1])
cm = plt.get_cmap('hot')
NUM_COLORS=5
colors = itertools.cycle([cm(1.*i/NUM_COLORS) for i in range(NUM_COLORS)])
color_map = {}
def GetColor(label):
if label in color_map:
return color_map[label]
return color_map.setdefault(label, colors.next())
GetColor(SrcDstLabel(0, 1))
GetColor(SrcDstLabel(2, 3))
GetColor(SrcDstLabel(6, 7))
GetColor(SrcDstLabel(10, 11))
for i, link in enumerate(INTERESTING_LINKS):
ax = axarr[i]
xs = []
fs = []
labels = []
for key, value in data.items():
assert(key[0] == args.metric)
path = key[1]
if link in path:
x, y = value
x = np.array(x, dtype=np.float64) * 0.000000000001
y = np.array(y, dtype=np.float64) * (100.0 / 1000.0 / 1000.0 / 1000.0) * 8
x, y = parser_wrapper.Bin(x, y, 100)
xs.append(x)
fs.append(interpolate.interp1d(x,y, bounds_error=False, fill_value=0))
labels.append(AggFromPath(path))
if len(xs) == 0:
continue
max_x = max(len(i) for i in xs)
x = None
for xi in xs:
if len(xi) == max_x:
x = xi
ys = [f(x) for f in fs]
colors_list = [GetColor(i) for i in labels]
ngons = ax.stackplot(x, ys, labels=labels, colors=colors_list)
# ngons[0].set_hatch('//')
ax.set_ylabel('Gbps')
ax.legend(loc=1, prop={'size': 10})
#color_items = color_map.items()
#ax.legend([plt.Rectangle((0, 0), 1, 1, fc=v) for _, v in color_items],
# [k for k, _ in color_items], ncol=2, loc=2)
ax_f.subplots_adjust(hspace=0)
plt.setp([a.get_xticklabels() for a in ax_f.axes[:-1]], visible=False)
plt.xlim([args.x_min, args.x_max])
plt.ylim([0,0.999])
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange(start, end, 300))
plt.xlabel('seconds')
plt.savefig('link_paths_out.pdf', bbox_inches='tight')
plt.show()
| mit |
vybstat/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
JT5D/scikit-learn | examples/ensemble/plot_forest_importances.py | 7 | 1742 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
import pylab as pl
pl.figure()
pl.title("Feature importances")
pl.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
pl.xticks(range(10), indices)
pl.xlim([-1, 10])
pl.show()
| bsd-3-clause |
rspavel/spack | var/spack/repos/builtin/packages/lbann/package.py | 1 | 11820 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import sys
from spack import *
class Lbann(CMakePackage, CudaPackage):
"""LBANN: Livermore Big Artificial Neural Network Toolkit. A distributed
memory, HPC-optimized, model and data parallel training toolkit for deep
neural networks."""
homepage = "http://software.llnl.gov/lbann/"
url = "https://github.com/LLNL/lbann/archive/v0.91.tar.gz"
git = "https://github.com/LLNL/lbann.git"
maintainers = ['bvanessen']
version('develop', branch='develop')
version('0.100', sha256='d1bab4fb6f1b80ae83a7286cc536a32830890f6e5b0c3107a17c2600d0796912')
version('0.99', sha256='3358d44f1bc894321ce07d733afdf6cb7de39c33e3852d73c9f31f530175b7cd')
version('0.98.1', sha256='9a2da8f41cd8bf17d1845edf9de6d60f781204ebd37bffba96d8872036c10c66')
version('0.98', sha256='8d64b9ac0f1d60db553efa4e657f5ea87e790afe65336117267e9c7ae6f68239')
version('0.97.1', sha256='2f2756126ac8bb993202cf532d72c4d4044e877f4d52de9fdf70d0babd500ce4')
version('0.97', sha256='9794a706fc7ac151926231efdf74564c39fbaa99edca4acb745ee7d20c32dae7')
version('0.96', sha256='97af78e9d3c405e963361d0db96ee5425ee0766fa52b43c75b8a5670d48e4b4a')
version('0.95', sha256='d310b986948b5ee2bedec36383a7fe79403721c8dc2663a280676b4e431f83c2')
version('0.94', sha256='567e99b488ebe6294933c98a212281bffd5220fc13a0a5cd8441f9a3761ceccf')
version('0.93', sha256='77bfd7fe52ee7495050f49bcdd0e353ba1730e3ad15042c678faa5eeed55fb8c')
version('0.92', sha256='9187c5bcbc562c2828fe619d53884ab80afb1bcd627a817edb935b80affe7b84')
version('0.91', sha256='b69f470829f434f266119a33695592f74802cff4b76b37022db00ab32de322f5')
variant('nccl', default=False, description='Builds with support for NCCL communication lib')
variant('opencv', default=True, description='Builds with support for image processing routines with OpenCV')
variant('seq_init', default=False, description='Force serial initialization of weight matrices.')
variant('dtype', default='float',
description='Type for floating point representation of weights',
values=('float', 'double'))
variant('build_type', default='Release',
description='The build type to build',
values=('Debug', 'Release'))
variant('al', default=True, description='Builds with support for Aluminum Library')
variant('conduit', default=True,
description='Builds with support for Conduit Library '
'(note that for v0.99 conduit is required)')
variant('vtune', default=False, description='Builds with support for Intel VTune')
variant('docs', default=False, description='Builds with support for building documentation')
variant('extras', default=False, description='Add python modules for LBANN related tools')
conflicts('@:0.90,0.99:', when='~conduit')
depends_on('cmake@3.16.0:', type='build')
# It seems that there is a need for one statement per version bounds
depends_on('hydrogen +openmp_blas +shared +int64', when='@:0.90,0.95: ~al')
depends_on('hydrogen +openmp_blas +shared +int64 +al', when='@:0.90,0.95: +al')
depends_on('hydrogen +openmp_blas +shared +int64 build_type=Debug',
when='build_type=Debug @:0.90,0.95: ~al')
depends_on('hydrogen +openmp_blas +shared +int64 build_type=Debug +al',
when='build_type=Debug @:0.90,0.95: +al')
depends_on('hydrogen +openmp_blas +shared +int64 +cuda',
when='+gpu @:0.90,0.95: ~al')
depends_on('hydrogen +openmp_blas +shared +int64 +cuda +al',
when='+gpu @:0.90,0.95: +al')
depends_on('hydrogen +openmp_blas +shared +int64 +cuda build_type=Debug',
when='build_type=Debug @:0.90,0.95: +gpu')
depends_on('hydrogen +openmp_blas +shared +int64 +cuda build_type=Debug +al',
when='build_type=Debug @:0.90,0.95: +gpu +al')
# Older versions depended on Elemental not Hydrogen
depends_on('elemental +openmp_blas +shared +int64', when='@0.91:0.94')
depends_on('elemental +openmp_blas +shared +int64 build_type=Debug',
when='build_type=Debug @0.91:0.94')
depends_on('aluminum', when='@:0.90,0.95: +al ~gpu')
depends_on('aluminum +cuda +ht', when='@:0.90,0.95: +al +cuda ~nccl')
depends_on('aluminum +cuda +nccl +ht', when='@:0.90,0.95: +al +cuda +nccl')
depends_on('cudnn', when='+cuda')
depends_on('cub', when='@0.94:0.98.2 +cuda')
depends_on('mpi')
depends_on('hwloc')
# LBANN wraps OpenCV calls in OpenMP parallel loops, build without OpenMP
# Additionally disable video related options, they incorrectly link in a
# bad OpenMP library when building with clang or Intel compilers
# Note that for Power systems we want the environment to add +powerpc +vsx
depends_on('opencv@3.2.0: +core +highgui +imgproc +jpeg +png +tiff +zlib '
'+fast-math ~calib3d ~cuda ~dnn ~eigen'
'~features2d ~flann ~gtk ~ipp ~ipp_iw ~jasper ~java ~lapack ~ml'
'~openmp ~opencl ~opencl_svm ~openclamdblas ~openclamdfft'
'~pthreads_pf ~python ~qt ~stitching ~superres ~ts ~video'
'~videostab ~videoio ~vtk', when='+opencv')
depends_on('cnpy')
depends_on('nccl', when='@0.94:0.98.2 +cuda +nccl')
depends_on('conduit@0.4.0: +hdf5', when='@0.94:0.99 +conduit')
depends_on('conduit@0.4.0: +hdf5', when='@:0.90,0.99:')
depends_on('python@3: +shared', type=('build', 'run'), when='@:0.90,0.99:')
extends("python")
depends_on('py-setuptools', type='build')
depends_on('py-argparse', type='run', when='@:0.90,0.99: ^python@:2.6')
depends_on('py-configparser', type='run', when='@:0.90,0.99: +extras')
depends_on('py-graphviz@0.10.1:', type='run', when='@:0.90,0.99: +extras')
depends_on('py-matplotlib@3.0.0:', type='run', when='@:0.90,0.99: +extras')
depends_on('py-numpy@1.16.0:', type=('build', 'run'), when='@:0.90,0.99: +extras')
depends_on('py-onnx@1.3.0:', type='run', when='@:0.90,0.99: +extras')
depends_on('py-pandas@0.24.1:', type='run', when='@:0.90,0.99: +extras')
depends_on('py-texttable@1.4.0:', type='run', when='@:0.90,0.99: +extras')
depends_on('py-pytest', type='test', when='@:0.90,0.99:')
depends_on('py-protobuf+cpp@3.6.1:', type=('build', 'run'), when='@:0.90,0.99:')
depends_on('py-breathe', type='build', when='+docs')
depends_on('doxygen', type='build', when='+docs')
depends_on('py-m2r', type='build', when='+docs')
depends_on('cereal')
depends_on('catch2', type='test')
depends_on('clara')
generator = 'Ninja'
depends_on('ninja', type='build')
@property
def common_config_args(self):
spec = self.spec
# Environment variables
cppflags = []
cppflags.append('-DLBANN_SET_EL_RNG -ldl')
return [
'-DCMAKE_CXX_FLAGS=%s' % ' '.join(cppflags),
'-DLBANN_VERSION=spack',
'-DCNPY_DIR={0}'.format(spec['cnpy'].prefix),
]
# Get any recent versions or non-numeric version
# Note that develop > numeric and non-develop < numeric
@when('@:0.90,0.94:')
def cmake_args(self):
spec = self.spec
args = self.common_config_args
args.extend([
'-DLBANN_WITH_TOPO_AWARE:BOOL=%s' % ('+cuda +nccl' in spec),
'-DLBANN_WITH_ALUMINUM:BOOL=%s' % ('+al' in spec),
'-DLBANN_WITH_CONDUIT:BOOL=%s' % ('+conduit' in spec),
'-DLBANN_WITH_CUDA:BOOL=%s' % ('+cuda' in spec),
'-DLBANN_WITH_CUDNN:BOOL=%s' % ('+cuda' in spec),
'-DLBANN_WITH_SOFTMAX_CUDA:BOOL=%s' % ('+cuda' in spec),
'-DLBANN_SEQUENTIAL_INITIALIZATION:BOOL=%s' %
('+seq_init' in spec),
'-DLBANN_WITH_TBINF=OFF',
'-DLBANN_WITH_VTUNE:BOOL=%s' % ('+vtune' in spec),
'-DLBANN_DATATYPE={0}'.format(spec.variants['dtype'].value),
'-DLBANN_VERBOSE=0',
'-DCEREAL_DIR={0}'.format(spec['cereal'].prefix),
# protobuf is included by py-protobuf+cpp
'-DProtobuf_DIR={0}'.format(spec['protobuf'].prefix)])
if spec.satisfies('@:0.90') or spec.satisfies('@0.95:'):
args.extend([
'-DHydrogen_DIR={0}/CMake/hydrogen'.format(
spec['hydrogen'].prefix)])
elif spec.satisfies('@0.94'):
args.extend([
'-DElemental_DIR={0}/CMake/elemental'.format(
spec['elemental'].prefix)])
if spec.satisfies('@0.94:0.98.2'):
args.extend(['-DLBANN_WITH_NCCL:BOOL=%s' %
('+cuda +nccl' in spec)])
if '+vtune' in spec:
args.extend(['-DVTUNE_DIR={0}'.format(spec['vtune'].prefix)])
if '+al' in spec:
args.extend(['-DAluminum_DIR={0}'.format(spec['aluminum'].prefix)])
if '+conduit' in spec:
args.extend([
'-DLBANN_CONDUIT_DIR={0}'.format(spec['conduit'].prefix),
'-DConduit_DIR={0}'.format(spec['conduit'].prefix)])
# Add support for OpenMP
if spec.satisfies('%clang') or spec.satisfies('%apple-clang'):
if sys.platform == 'darwin':
clang = self.compiler.cc
clang_bin = os.path.dirname(clang)
clang_root = os.path.dirname(clang_bin)
args.extend([
'-DOpenMP_CXX_FLAGS=-fopenmp=libomp',
'-DOpenMP_CXX_LIB_NAMES=libomp',
'-DOpenMP_libomp_LIBRARY={0}/lib/libomp.dylib'.format(
clang_root)])
if '+opencv' in spec:
args.extend(['-DOpenCV_DIR:STRING={0}'.format(
spec['opencv'].prefix)])
if '+cuda' in spec:
args.extend([
'-DCUDA_TOOLKIT_ROOT_DIR={0}'.format(
spec['cuda'].prefix)])
args.extend([
'-DcuDNN_DIR={0}'.format(
spec['cudnn'].prefix)])
if spec.satisfies('@0.94:0.98.2'):
args.extend(['-DCUB_DIR={0}'.format(
spec['cub'].prefix)])
if '+nccl' in spec:
args.extend([
'-DNCCL_DIR={0}'.format(
spec['nccl'].prefix)])
return args
@when('@0.91:0.93')
def cmake_args(self):
spec = self.spec
args = self.common_config_args
args.extend([
'-DWITH_CUDA:BOOL=%s' % ('+cuda' in spec),
'-DWITH_CUDNN:BOOL=%s' % ('+cuda' in spec),
'-DELEMENTAL_USE_CUBLAS:BOOL=%s' % (
'+cublas' in spec['elemental']),
'-DWITH_TBINF=OFF',
'-DWITH_VTUNE=OFF',
'-DElemental_DIR={0}'.format(spec['elemental'].prefix),
'-DELEMENTAL_MATH_LIBS={0}'.format(
spec['elemental'].libs),
'-DSEQ_INIT:BOOL=%s' % ('+seq_init' in spec),
'-DVERBOSE=0',
'-DLBANN_HOME=.'])
if spec.variants['dtype'].value == 'float':
args.extend(['-DDATATYPE=4'])
elif spec.variants['dtype'].value == 'double':
args.extend(['-DDATATYPE=8'])
if '+opencv' in spec:
args.extend(['-DOpenCV_DIR:STRING={0}'.format(
spec['opencv'].prefix)])
if '+cudnn' in spec:
args.extend(['-DcuDNN_DIR={0}'.format(
spec['cudnn'].prefix)])
if '+cub' in spec:
args.extend(['-DCUB_DIR={0}'.format(
spec['cub'].prefix)])
return args
| lgpl-2.1 |
ernfrid/skll | skll/data/featureset.py | 1 | 15821 | # License: BSD 3 clause
"""
Classes related to storing/merging feature sets.
:author: Dan Blanchard (dblanchard@ets.org)
:organization: ETS
"""
from __future__ import absolute_import, print_function, unicode_literals
from copy import deepcopy
import numpy as np
import scipy.sparse as sp
from six import iteritems
from six.moves import zip
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
from skll.data.dict_vectorizer import DictVectorizer as NewDictVectorizer
class FeatureSet(object):
"""
Encapsulation of all of the features, values, and metadata about a given
set of data.
This replaces ``ExamplesTuple`` from older versions.
:param name: The name of this feature set.
:type name: str
:param ids: Example IDs for this set.
:type ids: np.array
:param labels: labels for this set.
:type labels: np.array
:param features: The features for each instance represented as either a
list of dictionaries or an array-like (if `vectorizer` is
also specified).
:type features: list of dict or array-like
:param vectorizer: Vectorizer that created feature matrix.
:type vectorizer: DictVectorizer or FeatureHasher
.. note::
If ids, labels, and/or features are not None, the number of rows in
each array must be equal.
"""
def __init__(self, name, ids, labels=None, features=None,
vectorizer=None):
super(FeatureSet, self).__init__()
self.name = name
if isinstance(ids, list):
ids = np.array(ids)
self.ids = ids
if isinstance(labels, list):
labels = np.array(labels)
self.labels = labels
self.features = features
self.vectorizer = vectorizer
# Convert list of dicts to numpy array
if isinstance(self.features, list):
if self.vectorizer is None:
self.vectorizer = NewDictVectorizer(sparse=True)
self.features = self.vectorizer.fit_transform(self.features)
if self.features is not None:
num_feats = self.features.shape[0]
if self.ids is None:
raise ValueError('A list of IDs is required')
num_ids = self.ids.shape[0]
if num_feats != num_ids:
raise ValueError(('Number of IDs (%s) does not equal '
'number of feature rows (%s)') % (num_ids,
num_feats))
if self.labels is None:
self.labels = np.empty(num_feats)
self.labels.fill(None)
num_labels = self.labels.shape[0]
if num_feats != num_labels:
raise ValueError(('Number of labels (%s) does not equal '
'number of feature rows (%s)') % (num_labels,
num_feats))
def __contains__(self, value):
"""
Check if example ID is in set
"""
return value in self.ids
def __eq__(self, other):
"""
Check whether two featuresets are the same.
.. note::
We consider feature values to be equal if any differences are in the
sixth decimal place or higher.
"""
# We need to sort the indices for the underlying
# feature sparse matrix in case we haven't done
# so already.
if not self.features.has_sorted_indices:
self.features.sort_indices()
if not other.features.has_sorted_indices:
other.features.sort_indices()
return (self.ids.shape == other.ids.shape and
self.labels.shape == other.labels.shape and
self.features.shape == other.features.shape and
(self.ids == other.ids).all() and
(self.labels == other.labels).all() and
np.allclose(self.features.data, other.features.data,
rtol=1e-6) and
(self.features.indices == other.features.indices).all() and
(self.features.indptr == other.features.indptr).all() and
self.vectorizer == other.vectorizer)
def __iter__(self):
"""
Iterate through (ID, label, feature_dict) tuples in feature set.
"""
if self.features is not None:
if not isinstance(self.vectorizer, DictVectorizer):
raise ValueError('FeatureSets can only be iterated through if '
'they use a DictVectorizer for their feature '
'vectorizer.')
for id_, label_, feats in zip(self.ids, self.labels,
self.features):
# When calling inverse_transform we have to add [0] to get the
# results for the current instance because it always returns a
# 2D array
yield (id_, label_,
self.vectorizer.inverse_transform(feats)[0])
else:
return
def __len__(self):
return self.features.shape[0]
def __add__(self, other):
"""
Combine two feature sets to create a new one. This is done assuming
they both have the same instances with the same IDs in the same order.
"""
# Check that the sets of IDs are equal
if set(self.ids) != set(other.ids):
raise ValueError('IDs are not in the same order in each '
'feature set')
# Compute the relative ordering of IDs for merging the features
# and labels.
ids_indices = dict((y, x) for x, y in enumerate(other.ids))
relative_order = [ids_indices[self_id] for self_id in self.ids]
# Initialize the new feature set with a name and the IDs.
new_set = FeatureSet('+'.join(sorted([self.name, other.name])),
deepcopy(self.ids))
# Combine feature matrices and vectorizers.
if not isinstance(self.vectorizer, type(other.vectorizer)):
raise ValueError('Cannot combine FeatureSets because they are '
'not both using the same type of feature '
'vectorizer (e.g., DictVectorizer, '
'FeatureHasher)')
uses_feature_hasher = isinstance(self.vectorizer, FeatureHasher)
if uses_feature_hasher:
if (self.vectorizer.n_features !=
other.vectorizer.n_features):
raise ValueError('Cannot combine FeatureSets that uses '
'FeatureHashers with different values of '
'n_features setting.')
else:
# Check for duplicate feature names.
if (set(self.vectorizer.feature_names_) &
set(other.vectorizer.feature_names_)):
raise ValueError('Cannot combine FeatureSets because they '
'have duplicate feature names.')
num_feats = self.features.shape[1]
new_set.features = sp.hstack([self.features,
other.features[relative_order]],
'csr')
new_set.vectorizer = deepcopy(self.vectorizer)
if not uses_feature_hasher:
for feat_name, index in other.vectorizer.vocabulary_.items():
new_set.vectorizer.vocabulary_[feat_name] = (index +
num_feats)
other_names = other.vectorizer.feature_names_
new_set.vectorizer.feature_names_.extend(other_names)
# If either set has labels, check that they don't conflict.
if self.has_labels:
# labels should be the same for each FeatureSet, so store once.
if other.has_labels and \
not np.all(self.labels == other.labels[relative_order]):
raise ValueError('Feature sets have conflicting labels for '
'examples with the same ID.')
new_set.labels = deepcopy(self.labels)
else:
new_set.labels = deepcopy(other.labels[relative_order])
return new_set
def filter(self, ids=None, labels=None, features=None, inverse=False):
"""
Removes or keeps features and/or examples from the Featureset depending
on the passed in parameters.
:param ids: Examples to keep in the FeatureSet. If `None`, no ID
filtering takes place.
:type ids: list of str/float
:param labels: labels that we want to retain examples for. If `None`,
no label filtering takes place.
:type labels: list of str/float
:param features: Features to keep in the FeatureSet. To help with
filtering string-valued features that were converted
to sequences of boolean features when read in, any
features in the FeatureSet that contain a `=` will be
split on the first occurrence and the prefix will be
checked to see if it is in `features`.
If `None`, no feature filtering takes place.
Cannot be used if FeatureSet uses a FeatureHasher for
vectorization.
:type features: list of str
:param inverse: Instead of keeping features and/or examples in lists,
remove them.
:type inverse: bool
"""
# Construct mask that indicates which examples to keep
mask = np.ones(len(self), dtype=bool)
if ids is not None:
mask = np.logical_and(mask, np.in1d(self.ids, ids))
if labels is not None:
mask = np.logical_and(mask, np.in1d(self.labels, labels))
if inverse and (labels is not None or ids is not None):
mask = np.logical_not(mask)
# Remove examples not in mask
self.ids = self.ids[mask]
self.labels = self.labels[mask]
self.features = self.features[mask, :]
# Filter features
if features is not None:
if isinstance(self.vectorizer, FeatureHasher):
raise ValueError('FeatureSets with FeatureHasher vectorizers'
' cannot be filtered by feature.')
columns = np.array(sorted({feat_num for feat_name, feat_num in
iteritems(self.vectorizer.vocabulary_)
if (feat_name in features or
feat_name.split('=', 1)[0] in
features)}))
if inverse:
all_columns = np.arange(self.features.shape[1])
columns = all_columns[np.logical_not(np.in1d(all_columns,
columns))]
self.features = self.features[:, columns]
self.vectorizer.restrict(columns, indices=True)
def filtered_iter(self, ids=None, labels=None, features=None,
inverse=False):
"""
A version of ``__iter__`` that retains only the specified features
and/or examples from the output.
:param ids: Examples in the FeatureSet to keep. If `None`, no ID
filtering takes place.
:type ids: list of str/float
:param labels: labels that we want to retain examples for. If `None`,
no label filtering takes place.
:type labels: list of str/float
:param features: Features in the FeatureSet to keep. To help with
filtering string-valued features that were converted
to sequences of boolean features when read in, any
features in the FeatureSet that contain a `=` will be
split on the first occurrence and the prefix will be
checked to see if it is in `features`.
If `None`, no feature filtering takes place.
Cannot be used if FeatureSet uses a FeatureHasher for
vectorization.
:type features: list of str
:param inverse: Instead of keeping features and/or examples in lists,
remove them.
:type inverse: bool
"""
if self.features is not None and not isinstance(self.vectorizer,
DictVectorizer):
raise ValueError('FeatureSets can only be iterated through if they'
' use a DictVectorizer for their feature '
'vectorizer.')
for id_, label_, feats in zip(self.ids, self.labels, self.features):
# Skip instances with IDs not in filter
if ids is not None and (id_ in ids) == inverse:
continue
# Skip instances with labels not in filter
if labels is not None and (label_ in labels) == inverse:
continue
feat_dict = self.vectorizer.inverse_transform(feats)[0]
if features is not None:
feat_dict = {name: value for name, value in
iteritems(feat_dict) if
(inverse != (name in features or
name.split('=', 1)[0] in features))}
elif not inverse:
feat_dict = {}
yield id_, label_, feat_dict
def __sub__(self, other):
"""
:returns: a copy of ``self`` with all features in ``other`` removed.
"""
new_set = deepcopy(self)
new_set.filter(features=other.vectorizer.feature_names_,
inverse=True)
return new_set
@property
def has_labels(self):
"""
:returns: Whether or not this FeatureSet has any finite labels.
"""
if self.labels is not None:
return not (np.issubdtype(self.labels.dtype, float) and
np.isnan(np.min(self.labels)))
else:
return False
def __str__(self):
"""
:returns: a string representation of FeatureSet
"""
return str(self.__dict__)
def __repr__(self):
"""
:returns: a string representation of FeatureSet
"""
return repr(self.__dict__)
def __getitem__(self, value):
"""
:returns: A specific example by row number, or if given a slice,
a new FeatureSet containing a subset of the data.
"""
# Check if we're slicing
if isinstance(value, slice):
sliced_ids = self.ids[value]
sliced_feats = (self.features[value] if self.features is not None
else None)
sliced_labels = (self.labels[value] if self.labels is not None
else None)
return FeatureSet('{}_{}'.format(self.name, value), sliced_ids,
features=sliced_feats, labels=sliced_labels,
vectorizer=self.vectorizer)
else:
label = self.labels[value] if self.labels is not None else None
feats = self.features[value, :]
features = (self.vectorizer.inverse_transform(feats)[0] if
self.features is not None else {})
return self.ids[value], label, features
| bsd-3-clause |
yueranyuan/vector_edu | wavelet_analysis.py | 1 | 4265 | import numpy as np
import matplotlib.pyplot as plt
from learntools.emotiv.data import segment_raw_data, gen_wavelet_features
from learntools.emotiv.filter import filter_data
from learntools.libs.wavelet import signal_to_wavelet
def show_raw_wave(eeg):
for channel in xrange(14):
plt.plot(eeg[:, channel])
plt.show()
def show_raw_specgram(eeg, label, block=False):
fig, axs = plt.subplots(nrows=14, ncols=1)
for channel in xrange(14):
#axs[channel].plot(signal_to_freq_bins(eeg[:, channel], cutoffs=[0.5, 4.0, 7.0, 12.0, 30.0], sampling_rate=128))
axs[channel].specgram(eeg[:, channel], Fs=128)
axs[channel].set_title("{}[{}]".format(label, channel))
fig.show()
if block:
fig.ginput(timeout=0)
plt.close('all')
def specgram_slideshow(ds):
for row in xrange(len(ds)):
show_raw_specgram(ds['eeg'][row], "cond=" + str(ds['condition'][row]), block=True)
def plot_conditions(eeg, conditions):
eeg1_full = np.asarray(list(compress(eeg, conditions == 0)))
eeg2_full = np.asarray(list(compress(eeg, conditions == 1)))
# draw select trials
for i in xrange(10):
plt.subplot(1, 10, i + 1)
plt.pcolor(eeg1_full[i], cmap=plt.cm.Blues)
plt.show()
eeg1 = np.mean(eeg1_full, axis=0)
eeg2 = np.mean(eeg2_full, axis=0)
def _plot_heatmap(data):
return plt.pcolor(data, cmap=plt.cm.Blues)
# draw between class difference
plt.subplot(1, 3, 1)
_plot_heatmap(eeg1)
plt.subplot(1, 3, 2)
_plot_heatmap(eeg2)
plt.subplot(1, 3, 3)
_plot_heatmap(eeg1-eeg2)
plt.show()
# draw within class difference
plt.subplot(1, 4, 1)
_plot_heatmap(np.mean(eeg1_full[:(len(eeg1) / 2)], axis=0))
plt.subplot(1, 4, 2)
_plot_heatmap(np.mean(eeg1_full[(len(eeg1) / 2):], axis=0))
plt.subplot(1, 4, 3)
_plot_heatmap(np.mean(eeg2_full[:(len(eeg2) / 2)], axis=0))
plt.subplot(1, 4, 4)
_plot_heatmap(np.mean(eeg2_full[(len(eeg2) / 2):], axis=0))
plt.show()
def _shape(ys):
""" Get the shape of a non-numpy python array. This assumes the first index of every dimension is
indicative of the shape of the whole matrix.
Examples:
>>> _shape([1, 2, 3])
[3]
>>> _shape([[1, 2, 3], [4, 5]])
[2, 3]
"""
if hasattr(ys, '__len__'):
return [len(ys)] + _shape(ys[0])
else:
return []
def plot_waves(ys, ylim=None):
shape = _shape(ys)
if len(shape) > 3:
from operator import __mul__
dim1 = reduce(__mul__, shape[:-2])
dim2 = shape[-2]
elif len(shape) == 3:
dim1, dim2 = shape[:2]
elif len(shape) == 2:
dim1, dim2 = shape[0], 1
elif len(shape) == 1:
dim1 = dim2 = 1
else:
raise Exception("malformed ys")
def _plot_wave(y, i):
if len(_shape(y)) == 1:
print i
plt.subplot(dim1, dim2, i)
if ylim is not None:
plt.ylim(ylim)
plt.plot(y)
return i + 1
else:
for _y in y:
i = _plot_wave(_y, i)
return i
_plot_wave(ys, 1)
plt.show()
def analyze_waves(ds, n=20, ylim=(-80, 80)):
for i in xrange(n):
eeg_segment = ds['eeg'][i]
wavelet = signal_to_wavelet(eeg_segment[:, 0], min_length=0, max_length=None,
depth=5, family='db6')
plot_waves(eeg_segment.T)
plot_waves([(w, _downsample(w, 6)) for w in wavelet], ylim=ylim)
exit()
def analyze_features(ds, max_length=4):
ds = gen_wavelet_features(ds, duration=10, sample_rate=128, depth=5, min_length=3, max_length=max_length,
family='db6')
filter_data(ds)
eeg = ds['eeg'][:]
eeg = eeg.reshape((eeg.shape[0], 14, 6, max_length))
eeg_no_time = np.mean(eeg, axis=3)
plot_conditions(eeg=eeg_no_time, conditions=ds['condition'])
if __name__ == "__main__":
from itertools import compress
from learntools.libs.wavelet import _downsample
dataset_name = 'data/emotiv_all.gz'
ds = segment_raw_data(dataset_name=dataset_name, conds=['EyesOpen', 'EyesClosed'])
# analyze_waves(ds, n=2)
analyze_features(ds, max_length=4) | mit |
nelson-liu/scikit-learn | sklearn/feature_extraction/image.py | 19 | 17614 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x : integer
The size of the grid in the x direction.
n_y : integer
The size of the grid in the y direction.
n_z : integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was
handled by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was
handled by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
mpld3/mpld3_rewrite | test_plots/test_plot_w_html_tooltips.py | 1 | 1286 | """Plot to test HTML tooltip plugin
As a data explorer, I want to add rich information to each point in a
scatter plot, as details-on-demand"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np, pandas as pd
from mpld3_rewrite import plugins
css = """
table
{
border-collapse: collapse;
}
th
{
color: #ffffff;
background-color: #000000;
}
td
{
background-color: #cccccc;
}
table, th, td
{
font-family:Arial, Helvetica, sans-serif;
border: 1px solid black;
text-align: right;
}
"""
def main():
fig, ax = plt.subplots()
N = 50
df = pd.DataFrame(index=range(N))
df['x'] = np.random.randn(N)
df['y'] = np.random.randn(N)
df['z'] = np.random.randn(N)
labels = []
for i in range(N):
label = df.ix[[i], :].T
label.columns = ['Row {0}'.format(i)]
labels.append(str(label.to_html())) # .to_html() is unicode, so make leading 'u' go away with str()
points = ax.plot(df.x, df.y, 'o', color='k', mec='w', ms=15, mew=1, alpha=.9)
ax.set_xlabel('x')
ax.set_ylabel('y')
tooltip = plugins.PointHTMLTooltip(
points[0], labels, voffset=10, hoffset=10, css=css)
plugins.connect(fig, tooltip)
return fig
if __name__ == '__main__':
fig = main()
plt.show()
| bsd-3-clause |
shusenl/scikit-learn | examples/ensemble/plot_partial_dependence.py | 249 | 4456 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
| bsd-3-clause |
zangsir/sms-tools | lectures/04-STFT/plots-code/sine-spectrum.py | 24 | 1563 | import matplotlib.pyplot as plt
import numpy as np
from scipy.fftpack import fft, ifft
N = 256
M = 63
f0 = 1000
fs = 10000
A0 = .8
hN = N/2
hM = (M+1)/2
fftbuffer = np.zeros(N)
X1 = np.zeros(N, dtype='complex')
X2 = np.zeros(N, dtype='complex')
x = A0 * np.cos(2*np.pi*f0/fs*np.arange(-hM+1,hM))
plt.figure(1, figsize=(9.5, 7))
w = np.hanning(M)
plt.subplot(2,3,1)
plt.title('w (hanning window)')
plt.plot(np.arange(-hM+1, hM), w, 'b', lw=1.5)
plt.axis([-hM+1, hM, 0, 1])
fftbuffer[:hM] = w[hM-1:]
fftbuffer[N-hM+1:] = w[:hM-1]
X = fft(fftbuffer)
X1[:hN] = X[hN:]
X1[N-hN:] = X[:hN]
mX = 20*np.log10(abs(X1))
plt.subplot(2,3,2)
plt.title('mW')
plt.plot(np.arange(-hN, hN), mX, 'r', lw=1.5)
plt.axis([-hN,hN,-40,max(mX)])
pX = np.angle(X1)
plt.subplot(2,3,3)
plt.title('pW')
plt.plot(np.arange(-hN, hN), np.unwrap(pX), 'c', lw=1.5)
plt.axis([-hN,hN,min(np.unwrap(pX)),max(np.unwrap(pX))])
plt.subplot(2,3,4)
plt.title('xw (windowed sinewave)')
xw = x*w
plt.plot(np.arange(-hM+1, hM), xw, 'b', lw=1.5)
plt.axis([-hM+1, hM, -1, 1])
fftbuffer = np.zeros(N)
fftbuffer[0:hM] = xw[hM-1:]
fftbuffer[N-hM+1:] = xw[:hM-1]
X = fft(fftbuffer)
X2[:hN] = X[hN:]
X2[N-hN:] = X[:hN]
mX2 = 20*np.log10(abs(X2))
plt.subplot(2,3,5)
plt.title('mXW')
plt.plot(np.arange(-hN, hN), mX2, 'r', lw=1.5)
plt.axis([-hN,hN,-40,max(mX)])
pX = np.angle(X2)
plt.subplot(2,3,6)
plt.title('pXW')
plt.plot(np.arange(-hN, hN), np.unwrap(pX), 'c', lw=1.5)
plt.axis([-hN,hN,min(np.unwrap(pX)),max(np.unwrap(pX))])
plt.tight_layout()
plt.savefig('sine-spectrum.png')
plt.show()
| agpl-3.0 |
herilalaina/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
micksi/datamining_exam | omer/content_base_filtering.py | 1 | 2632 | import sys
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.neighbors import NearestNeighbors
pd.set_option('display.max_colwidth', 300)
file_name = "anime.csv"
file_name_rating = 'rating.csv'
def normalize(df_user_profiles):
x = df_user_profiles.iloc[:,1:].values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x.T)
df_user_profiles = pd.concat([df_user_profiles['user_id'], pd.DataFrame(x_scaled.T, columns=df_user_profiles.columns[1:])], axis=1)
return df_user_profiles
def get_user_profile(user_id, df_rating, df_a_fatures):
df_user = df_rating.loc[df_rating['user_id'] == user_id]
df_merged = pd.merge(df_user, df_a_fatures, how='left', left_on='anime_id', right_on='anime_id').drop(['anime_id', 'rating'], axis=1)
# Count only 1's
df_user_sum = df_merged.apply(pd.Series.value_counts).loc[df_merged.index == 1]
df_user_sum.fillna(0, inplace = True)
df_user_sum.user_id = user_id
return df_user_sum
def print_animes_by_indices(indices):
for i in indices:
print df_animes.iloc[i,:3]
#
df_rating = pd.read_csv(file_name_rating)
df_animes = pd.read_csv(file_name)
df_anime_genres = pd.get_dummies(df_animes['genre'].str.get_dummies(sep=", ")) # creates genre vectors
df_anime_vector = pd.concat([df_animes['anime_id'], df_anime_genres], axis=1) # anime_id + genre vector
# first 10 users
users = list(df_rating['user_id'].unique())[:10]
# Create user profiles:
df_user_profiles = pd.DataFrame()
for u in users:
u_prof = get_user_profile(u, df_rating, df_anime_vector)
df_user_profiles = df_user_profiles.append(u_prof, ignore_index = True)
# ??? form user profile from 80% of the wathced animes
# Normalize user profile
df_user_prof_norm = normalize(df_user_profiles)
# User
user_id = 1
user_animes = df_rating[df_rating['user_id'] == user_id]
user_animes = user_animes['anime_id'].tolist() # animes watched by the user
# Remove the animes watched by the user
df_anime_vector_foruser = df_anime_vector[~df_anime_vector['anime_id'].isin(user_animes)]
# Feed the animes
nbrs = NearestNeighbors(n_neighbors=10, algorithm='ball_tree').fit(df_anime_vector_foruser.iloc[:,1:])
user_prof = df_user_prof_norm[df_user_prof_norm['user_id'] == user_id]
user_prof = user_prof.drop('user_id', axis=1)
# Get closest neighbours
distances, indices = nbrs.kneighbors(user_prof)
print "Our recommendations: "
print_animes_by_indices(indices.tolist())
print "User profile (non-normalized):"
print df_user_profiles[df_user_profiles['user_id'] == user_id].T
| gpl-2.0 |
xiaoxiamii/scikit-learn | sklearn/utils/fixes.py | 133 | 12882 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
| bsd-3-clause |
lgarren/spack | var/spack/repos/builtin/packages/julia/package.py | 3 | 9966 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
import sys
class Julia(Package):
"""The Julia Language: A fresh approach to technical computing"""
homepage = "http://julialang.org"
url = "https://github.com/JuliaLang/julia/releases/download/v0.4.3/julia-0.4.3-full.tar.gz"
version('master',
git='https://github.com/JuliaLang/julia.git', branch='master')
version('release-0.5',
git='https://github.com/JuliaLang/julia.git', branch='release-0.5')
version('0.5.2', '8c3fff150a6f96cf0536fb3b4eaa5cbb', preferred=True)
version('0.5.1', 'bce119b98f274e0f07ce01498c463ad5')
version('0.5.0', 'b61385671ba74767ab452363c43131fb')
version('release-0.4',
git='https://github.com/JuliaLang/julia.git', branch='release-0.4')
version('0.4.7', '75a7a7dd882b7840829d8f165e9b9078')
version('0.4.6', 'd88db18c579049c23ab8ef427ccedf5d')
version('0.4.5', '69141ff5aa6cee7c0ec8c85a34aa49a6')
version('0.4.3', '8a4a59fd335b05090dd1ebefbbe5aaac')
# TODO: Split these out into jl-hdf5, jl-mpi packages etc.
variant("cxx", default=False, description="Prepare for Julia Cxx package")
variant("hdf5", default=False, description="Install Julia HDF5 package")
variant("mpi", default=True, description="Install Julia MPI package")
variant("plot", default=False,
description="Install Julia plotting packages")
variant("python", default=False,
description="Install Julia Python package")
variant("simd", default=False, description="Install Julia SIMD package")
patch('gc.patch', when='@0.4:0.4.5')
patch('openblas.patch', when='@0.4:0.4.5')
variant('binutils', default=sys.platform != 'darwin',
description="Build via binutils")
# Build-time dependencies:
# depends_on("awk")
depends_on("m4", type="build")
# depends_on("pkg-config")
# Combined build-time and run-time dependencies:
# (Yes, these are run-time dependencies used by Julia's package manager.)
depends_on("binutils", when='+binutils')
depends_on("cmake @2.8:")
depends_on("curl")
depends_on("git", when='@:0.4')
depends_on("git", when='@release-0.4')
depends_on("openssl")
depends_on("python@2.7:2.8")
# Run-time dependencies:
# depends_on("arpack")
# depends_on("fftw +float")
# depends_on("gmp")
# depends_on("libgit")
# depends_on("mpfr")
# depends_on("openblas")
# depends_on("pcre2")
# ARPACK: Requires BLAS and LAPACK; needs to use the same version
# as Julia.
# BLAS and LAPACK: Julia prefers 64-bit versions on 64-bit
# systems. OpenBLAS has an option for this; make it available as
# variant.
# FFTW: Something doesn't work when using a pre-installed FFTW
# library; need to investigate.
# GMP, MPFR: Something doesn't work when using a pre-installed
# FFTW library; need to investigate.
# LLVM: Julia works only with specific versions, and might require
# patches. Thus we let Julia install its own LLVM.
# Other possible dependencies:
# USE_SYSTEM_OPENLIBM=0
# USE_SYSTEM_OPENSPECFUN=0
# USE_SYSTEM_DSFMT=0
# USE_SYSTEM_SUITESPARSE=0
# USE_SYSTEM_UTF8PROC=0
# USE_SYSTEM_LIBGIT2=0
# Run-time dependencies for Julia packages:
depends_on("hdf5", when="+hdf5", type="run")
depends_on("mpi", when="+mpi", type="run")
depends_on("py-matplotlib", when="+plot", type="run")
def install(self, spec, prefix):
# Julia needs git tags
if os.path.isfile(".git/shallow"):
git = which("git")
git("fetch", "--unshallow")
# Explicitly setting CC, CXX, or FC breaks building libuv, one
# of Julia's dependencies. This might be a Darwin-specific
# problem. Given how Spack sets up compilers, Julia should
# still use Spack's compilers, even if we don't specify them
# explicitly.
options = [
# "CC=cc",
# "CXX=c++",
# "FC=fc",
# "USE_SYSTEM_ARPACK=1",
"override USE_SYSTEM_CURL=1",
# "USE_SYSTEM_FFTW=1",
# "USE_SYSTEM_GMP=1",
# "USE_SYSTEM_MPFR=1",
# "USE_SYSTEM_PCRE=1",
"prefix=%s" % prefix]
if "+cxx" in spec:
if "@master" not in spec:
raise InstallError(
"Variant +cxx requires the @master version of Julia")
options += [
"BUILD_LLVM_CLANG=1",
"LLVM_ASSERTIONS=1",
"USE_LLVM_SHLIB=1"]
with open('Make.user', 'w') as f:
f.write('\n'.join(options) + '\n')
make()
make("install")
# Julia's package manager needs a certificate
cacert_dir = join_path(prefix, "etc", "curl")
mkdirp(cacert_dir)
cacert_file = join_path(cacert_dir, "cacert.pem")
curl = which("curl")
curl("--create-dirs",
"--output", cacert_file,
"https://curl.haxx.se/ca/cacert.pem")
# Put Julia's compiler cache into a private directory
cachedir = join_path(prefix, "var", "julia", "cache")
mkdirp(cachedir)
# Store Julia packages in a private directory
pkgdir = join_path(prefix, "var", "julia", "pkg")
mkdirp(pkgdir)
# Configure Julia
with open(join_path(prefix, "etc", "julia", "juliarc.jl"),
"a") as juliarc:
if "@master" in spec or "@release-0.5" in spec or "@0.5:" in spec:
# This is required for versions @0.5:
juliarc.write(
'# Point package manager to working certificates\n')
juliarc.write('LibGit2.set_ssl_cert_locations("%s")\n' %
cacert_file)
juliarc.write('\n')
juliarc.write('# Put compiler cache into a private directory\n')
juliarc.write('empty!(Base.LOAD_CACHE_PATH)\n')
juliarc.write('unshift!(Base.LOAD_CACHE_PATH, "%s")\n' % cachedir)
juliarc.write('\n')
juliarc.write('# Put Julia packages into a private directory\n')
juliarc.write('ENV["JULIA_PKGDIR"] = "%s"\n' % pkgdir)
juliarc.write('\n')
# Install some commonly used packages
julia = spec['julia'].command
julia("-e", 'Pkg.init(); Pkg.update()')
# Install HDF5
if "+hdf5" in spec:
with open(join_path(prefix, "etc", "julia", "juliarc.jl"),
"a") as juliarc:
juliarc.write('# HDF5\n')
juliarc.write('push!(Libdl.DL_LOAD_PATH, "%s")\n' %
spec["hdf5"].prefix.lib)
juliarc.write('\n')
julia("-e", 'Pkg.add("HDF5"); using HDF5')
julia("-e", 'Pkg.add("JLD"); using JLD')
# Install MPI
if "+mpi" in spec:
with open(join_path(prefix, "etc", "julia", "juliarc.jl"),
"a") as juliarc:
juliarc.write('# MPI\n')
juliarc.write('ENV["JULIA_MPI_C_COMPILER"] = "%s"\n' %
join_path(spec["mpi"].prefix.bin, "mpicc"))
juliarc.write('ENV["JULIA_MPI_Fortran_COMPILER"] = "%s"\n' %
join_path(spec["mpi"].prefix.bin, "mpifort"))
juliarc.write('\n')
julia("-e", 'Pkg.add("MPI"); using MPI')
# Install Python
if "+python" in spec or "+plot" in spec:
with open(join_path(prefix, "etc", "julia", "juliarc.jl"),
"a") as juliarc:
juliarc.write('# Python\n')
juliarc.write('ENV["PYTHON"] = "%s"\n' % spec["python"].home)
juliarc.write('\n')
# Python's OpenSSL package installer complains:
# Error: PREFIX too long: 166 characters, but only 128 allowed
# Error: post-link failed for: openssl-1.0.2g-0
julia("-e", 'Pkg.add("PyCall"); using PyCall')
if "+plot" in spec:
julia("-e", 'Pkg.add("PyPlot"); using PyPlot')
julia("-e", 'Pkg.add("Colors"); using Colors')
# These require maybe gtk and image-magick
julia("-e", 'Pkg.add("Plots"); using Plots')
julia("-e", 'Pkg.add("PlotRecipes"); using PlotRecipes')
julia("-e", 'Pkg.add("UnicodePlots"); using UnicodePlots')
julia("-e", """\
using Plots
using UnicodePlots
unicodeplots()
plot(x->sin(x)*cos(x), linspace(0, 2pi))
""")
# Install SIMD
if "+simd" in spec:
julia("-e", 'Pkg.add("SIMD"); using SIMD')
julia("-e", 'Pkg.status()')
| lgpl-2.1 |
JaviMerino/trappy | trappy/stats/Aggregator.py | 1 | 5544 | # Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Aggregators are responsible for aggregating information
for further analysis. These aggregations can produce
both scalars and vectors and each aggregator implementation
is expected to handle its "aggregation" mechanism.
"""
from trappy.utils import listify
from trappy.stats.Indexer import MultiTriggerIndexer
from abc import ABCMeta, abstractmethod
class AbstractAggregator(object):
"""Abstract class for all aggregators
:param indexer: Indexer is passed on by the Child class
for handling indices during correlation
:type indexer: :mod:`trappy.stats.Indexer.Indexer`
:param aggfunc: Function that accepts a pandas.Series and
process it for aggregation.
:type aggfunc: function
"""
__metaclass__ = ABCMeta
# The current implementation needs the index to
# be unified across data frames to account for
# variable sampling across data frames
def __init__(self, indexer, aggfunc=None):
self._result = {}
self._aggregated = False
self._aggfunc = aggfunc
self.indexer = indexer
def _add_result(self, pivot, series):
"""Add the result for the given pivot and trace
:param pivot: The pivot for which the result is being generated
:type pivot(hashable)
:param series: series to be added to result
:type series: :mod:`pandas.Series`
"""
if pivot not in self._result:
self._result[pivot] = self.indexer.series()
for idx in series.index:
self._result[pivot][idx] = series[idx]
@abstractmethod
def aggregate(self, trace_idx, **kwargs):
"""Abstract Method for aggregating data for various
pivots.
:param trace_idx: Index of the trace to be aggregated
:type trace_idx: int
:return: The aggregated result
"""
raise NotImplementedError("Method Not Implemented")
class MultiTriggerAggregator(AbstractAggregator):
"""This aggregator accepts a list of triggers and each trigger has
a value associated with it.
"""
def __init__(self, triggers, topology, aggfunc=None):
"""
:param triggers: trappy.stat.Trigger): A list or a singular trigger object
:type triggers: :mod:`trappy.stat.Trigger.Trigger`
:param topology (trappy.stat.Topology): A topology object for aggregation
levels
:type topology: :mod:`trappy.stat.Topology`
:param aggfunc: A function to be applied on each series being aggregated.
For each topology node, a series will be generated and this
will be processed by the aggfunc
:type aggfunc: function
"""
self._triggers = triggers
self.topology = topology
super(
MultiTriggerAggregator,
self).__init__(MultiTriggerIndexer(triggers), aggfunc)
def aggregate(self, **kwargs):
"""
Aggregate implementation that aggregates
triggers for a given topological level. All the arguments passed to
it are forwarded to the aggregator function except level (if present)
:return: A scalar or a vector aggregated result. Each group in the
level produces an element in the result list with a one to one
index correspondence
::
groups["level"] = [[1,2], [3,4]]
result = [result_1, result_2]
"""
level = kwargs.pop("level", "all")
# This function is a hot spot in the code. It is
# worth considering a memoize decorator to cache
# the function. The memoization can also be
# maintained by the aggregator object. This will
# help the code scale efficeintly
level_groups = self.topology.get_level(level)
result = []
if not self._aggregated:
self._aggregate_base()
for group in level_groups:
group = listify(group)
level_res = self._aggfunc(self._result[group[0]], **kwargs)
for node in group[1:]:
if self._aggfunc is not None:
node_res = self._aggfunc(self._result[node], **kwargs)
else:
node_res = self._result[node]
level_res += node_res
result.append(level_res)
return result
def _aggregate_base(self):
"""A memoized function to generate the base series
for each node in the flattened topology
::
topo["level_1"] = [[1, 2], [3, 4]]
This function will generate the fundamental
aggregations for all nodes 1, 2, 3, 4 and
store the result in _agg_result
"""
for trigger in self._triggers:
for node in self.topology.flatten():
result_series = trigger.generate(node)
self._add_result(node, result_series)
self._aggregated = True
| apache-2.0 |
rahuldhote/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
DANA-Laboratory/CoolProp | wrappers/Python/CoolProp/tests/test_plots.py | 4 | 4608 | import numpy as np
import matplotlib.pyplot as plt
def test_back_compatibility():
fluid_ref = 'R290'
def Ts_plot_tests():
from CoolProp.Plots import Ts
Ts(fluid_ref, show=False)
from matplotlib import pyplot
fig = pyplot.figure(2)
ax = fig.gca()
Ts(fluid_ref, show=False, axis=ax)
plt.close()
Ts(fluid_ref, show=False, Tmin=200, Tmax=300)
plt.close()
def Ph_plot_tests():
from CoolProp.Plots import Ph
Ph(fluid_ref, show=False)
from matplotlib import pyplot
fig = pyplot.figure(2)
ax = fig.gca()
Ph(fluid_ref, show=False, axis=ax)
plt.close()
Ph(fluid_ref, show=False, Tmin=200, Tmax=300)
plt.close()
def PT_plot_tests():
from CoolProp.Plots import PT
PT(fluid_ref, show=False)
from matplotlib import pyplot
fig = pyplot.figure(2)
ax = fig.gca()
PT(fluid_ref, show=False, axis=ax)
plt.close()
PT(fluid_ref, show=False, Tmin=200, Tmax=300)
plt.close()
def Ps_plot_tests():
from CoolProp.Plots import Ps
Ps(fluid_ref, show=False)
from matplotlib import pyplot
fig = pyplot.figure(2)
ax = fig.gca()
Ps(fluid_ref, show=False, axis=ax)
plt.close()
Ps(fluid_ref, show=False, Tmin=200, Tmax=300)
plt.close()
def Prho_plot_tests():
from CoolProp.Plots import Prho
Prho(fluid_ref, show=False)
from matplotlib import pyplot
fig = pyplot.figure(2)
ax = fig.gca()
Prho(fluid_ref, show=False, axis=ax)
plt.close()
Prho(fluid_ref, show=False, Tmin=200, Tmax=300)
plt.close()
def Trho_plot_tests():
from CoolProp.Plots import Trho
Trho(fluid_ref, show=False)
from matplotlib import pyplot
fig = pyplot.figure(2)
ax = fig.gca()
Trho(fluid_ref, show=False, axis=ax)
plt.close()
Trho(fluid_ref, show=False, Tmin=200, Tmax=300)
plt.close()
def hs_plot_tests():
from CoolProp.Plots import hs
hs(fluid_ref, show=False)
from matplotlib import pyplot
fig = pyplot.figure(2)
ax = fig.gca()
hs(fluid_ref, show=False, axis=ax)
plt.close()
hs(fluid_ref, show=False, Tmin=200, Tmax=300)
plt.close()
def Isolines_plot_tests():
from matplotlib import pyplot
from CoolProp.Plots import Ts, drawIsoLines
ax = Ts(fluid_ref)
#ax.set_xlim([-0.5, 1.5])
#ax.set_ylim([300, 530])
quality = drawIsoLines(fluid_ref, 'Ts', 'Q', [0.3, 0.5, 0.7, 0.8], axis=ax)
isobars = drawIsoLines(fluid_ref, 'Ts', 'P', [100, 2000], num=5, axis=ax)
isochores = drawIsoLines(fluid_ref, 'Ts', 'D', [2, 600], num=7, axis=ax)
pyplot.close()
Ts_plot_tests()
Ph_plot_tests()
Ps_plot_tests()
PT_plot_tests()
Prho_plot_tests()
Trho_plot_tests()
hs_plot_tests()
Isolines_plot_tests()
def test_new_code():
fluid_ref = 'Water'
def Ts_plot_tests():
from CoolProp.Plots import PropsPlot
PP = PropsPlot(fluid_ref, 'Ts')
plt.close()
def Ph_plot_tests():
from CoolProp.Plots import PropsPlot
PP = PropsPlot(fluid_ref, 'Ph')
plt.close()
def Isolines_plot_tests():
from CoolProp.Plots import PropsPlot
PP = PropsPlot(fluid_ref, 'Ts')
#plt.set_axis_limits([-0.5, 1.5, 300, 530])
PP.draw_isolines('Q', [0.3, 0.5, 0.7, 0.8])
PP.draw_isolines('P', [100, 2000], num=5)
PP.draw_isolines('D', [2, 600], num=7)
plt.close()
def Graph_annotations():
from CoolProp.Plots import PropsPlot, IsoLines
PP = PropsPlot(fluid_ref, 'Ts')
PP.draw_isolines('Q', [0.3, 0.5, 0.7, 0.8])
PP.draw_isolines('P', [100, 2000], num=5)
PP.draw_isolines('D', [2, 600], num=7)
plt.title('New Title')
PP.xlabel('New x label')
PP.ylabel('New y label')
PP = IsoLines(fluid_ref, 'Ts', 'P')
PP.draw_isolines([100, 2000], num=5)
plt.close()
def Mixture():
from CoolProp.Plots import PropsPlot
PP = PropsPlot('REFPROP-MIX:R32[0.47319469]&R125[0.2051091]&R134a[0.32169621]', 'TD')
PP._plot_default_annotations()
plt.close()
Ts_plot_tests()
Ph_plot_tests()
Isolines_plot_tests()
Graph_annotations()
Mixture()
if __name__=='__main__':
import nose
nose.runmodule()
| mit |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/cluster/dbscan_.py | 1 | 12153 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# Lars Buitinck
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from ._dbscan_inner import dbscan_inner
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..utils import check_array, check_consistent_length
from ..utils.fixes import astype
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski',
algorithm='auto', leaf_size=30, p=2, sample_weight=None, n_jobs=1):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.cumsum(X_mask)[X.indptr[1:] - 1]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p,
n_jobs=n_jobs)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
.. versionadded:: 0.17
metric *precomputed* to accept precomputed sparse matrix.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
algorithm='auto', leaf_size=30, p=None, n_jobs=1):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.n_jobs = n_jobs
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight,
**self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| mit |
mkukielka/oddt | docs/conf.py | 1 | 12238 | # -*- coding: utf-8 -*-
#
# ODDT documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 25 13:49:30 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'numpydoc',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members', 'imported-members']
autoclass_content = 'init'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Open Drug Discovery Toolkit'
copyright = u'2015, Maciej Wojcikowski'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from oddt import __version__ as VERSION
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ODDTdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ODDT.tex', u'ODDT Documentation',
u'Maciej Wojcikowski', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'Open Drug Discovery Toolkit', u'ODDT Documentation',
[u'Maciej Wojcikowski'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Open Drug Discovery Toolkit', u'ODDT Documentation',
u'Maciej Wojcikowski', 'Open Drug Discovery Toolkit', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Open Drug Discovery Toolkit'
epub_author = u'Maciej Wojcikowski'
epub_publisher = u'Maciej Wojcikowski'
epub_copyright = u'2015, Maciej Wojcikowski'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'Open Drug Discovery Toolkit'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {}
intersphinx_mapping['sklearn'] = ('http://scikit-learn.org/stable', None)
intersphinx_mapping['numpy'] = ('http://docs.scipy.org/doc/numpy/', None)
intersphinx_mapping['scipy'] = ('http://docs.scipy.org/doc/scipy/reference/', None)
# Ignore some modules during documentation building on readthedocs.org
if os.environ.get('READTHEDOCS', None) == 'True':
# Invoke sphinx-apidoc
os.system("sphinx-apidoc -f -o rst/ ../oddt")
try:
from unittest.mock import MagicMock # Python 3.3
except ImportError:
from mock import patch, MagicMock
pybel = MagicMock()
openbabel = MagicMock()
rdkit = MagicMock()
modules = {
# OpenBabel
'pybel': pybel,
'openbabel' : openbabel,
# RDK
'rdkit': rdkit,
'rdkit.Chem': rdkit.Chem,
'rdkit.DataStructs': rdkit.DataStructs,
'rdkit.Chem.MACCSkeys': rdkit.Chem.MACCSkeys,
'rdkit.Chem.AtomPairs': rdkit.Chem.AtomPairs,
'rdkit.Chem.AtomPairs.Pairs': rdkit.Chem.AtomPairs.Pairs,
'rdkit.Chem.AtomPairs.Torsions': rdkit.Chem.AtomPairs.Torsions,
'rdkit.Chem.Lipinski': rdkit.Chem.Lipinski,
'rdkit.Chem.AllChem': rdkit.Chem.AllChem,
'rdkit.Chem.Pharm2D': rdkit.Chem.Pharm2D,
}
p = patch.dict('sys.modules', modules)
p.start()
| bsd-3-clause |
MJuddBooth/pandas | pandas/tests/io/formats/test_to_excel.py | 3 | 10955 | """Tests formatting as writer-agnostic ExcelCells
ExcelFormatter is tested implicitly in pandas/tests/io/test_excel.py
"""
import pytest
import pandas.util.testing as tm
from pandas.io.formats.css import CSSWarning
from pandas.io.formats.excel import CSSToExcelConverter
@pytest.mark.parametrize('css,expected', [
# FONT
# - name
('font-family: foo,bar', {'font': {'name': 'foo'}}),
('font-family: "foo bar",baz', {'font': {'name': 'foo bar'}}),
('font-family: foo,\nbar', {'font': {'name': 'foo'}}),
('font-family: foo, bar, baz', {'font': {'name': 'foo'}}),
('font-family: bar, foo', {'font': {'name': 'bar'}}),
('font-family: \'foo bar\', baz', {'font': {'name': 'foo bar'}}),
('font-family: \'foo \\\'bar\', baz', {'font': {'name': 'foo \'bar'}}),
('font-family: "foo \\"bar", baz', {'font': {'name': 'foo "bar'}}),
('font-family: "foo ,bar", baz', {'font': {'name': 'foo ,bar'}}),
# - family
('font-family: serif', {'font': {'name': 'serif', 'family': 1}}),
('font-family: Serif', {'font': {'name': 'serif', 'family': 1}}),
('font-family: roman, serif', {'font': {'name': 'roman', 'family': 1}}),
('font-family: roman, sans-serif', {'font': {'name': 'roman',
'family': 2}}),
('font-family: roman, sans serif', {'font': {'name': 'roman'}}),
('font-family: roman, sansserif', {'font': {'name': 'roman'}}),
('font-family: roman, cursive', {'font': {'name': 'roman', 'family': 4}}),
('font-family: roman, fantasy', {'font': {'name': 'roman', 'family': 5}}),
# - size
('font-size: 1em', {'font': {'size': 12}}),
('font-size: xx-small', {'font': {'size': 6}}),
('font-size: x-small', {'font': {'size': 7.5}}),
('font-size: small', {'font': {'size': 9.6}}),
('font-size: medium', {'font': {'size': 12}}),
('font-size: large', {'font': {'size': 13.5}}),
('font-size: x-large', {'font': {'size': 18}}),
('font-size: xx-large', {'font': {'size': 24}}),
('font-size: 50%', {'font': {'size': 6}}),
# - bold
('font-weight: 100', {'font': {'bold': False}}),
('font-weight: 200', {'font': {'bold': False}}),
('font-weight: 300', {'font': {'bold': False}}),
('font-weight: 400', {'font': {'bold': False}}),
('font-weight: normal', {'font': {'bold': False}}),
('font-weight: lighter', {'font': {'bold': False}}),
('font-weight: bold', {'font': {'bold': True}}),
('font-weight: bolder', {'font': {'bold': True}}),
('font-weight: 700', {'font': {'bold': True}}),
('font-weight: 800', {'font': {'bold': True}}),
('font-weight: 900', {'font': {'bold': True}}),
# - italic
('font-style: italic', {'font': {'italic': True}}),
('font-style: oblique', {'font': {'italic': True}}),
# - underline
('text-decoration: underline',
{'font': {'underline': 'single'}}),
('text-decoration: overline',
{}),
('text-decoration: none',
{}),
# - strike
('text-decoration: line-through',
{'font': {'strike': True}}),
('text-decoration: underline line-through',
{'font': {'strike': True, 'underline': 'single'}}),
('text-decoration: underline; text-decoration: line-through',
{'font': {'strike': True}}),
# - color
('color: red', {'font': {'color': 'FF0000'}}),
('color: #ff0000', {'font': {'color': 'FF0000'}}),
('color: #f0a', {'font': {'color': 'FF00AA'}}),
# - shadow
('text-shadow: none', {'font': {'shadow': False}}),
('text-shadow: 0px -0em 0px #CCC', {'font': {'shadow': False}}),
('text-shadow: 0px -0em 0px #999', {'font': {'shadow': False}}),
('text-shadow: 0px -0em 0px', {'font': {'shadow': False}}),
('text-shadow: 2px -0em 0px #CCC', {'font': {'shadow': True}}),
('text-shadow: 0px -2em 0px #CCC', {'font': {'shadow': True}}),
('text-shadow: 0px -0em 2px #CCC', {'font': {'shadow': True}}),
('text-shadow: 0px -0em 2px', {'font': {'shadow': True}}),
('text-shadow: 0px -2em', {'font': {'shadow': True}}),
# FILL
# - color, fillType
('background-color: red', {'fill': {'fgColor': 'FF0000',
'patternType': 'solid'}}),
('background-color: #ff0000', {'fill': {'fgColor': 'FF0000',
'patternType': 'solid'}}),
('background-color: #f0a', {'fill': {'fgColor': 'FF00AA',
'patternType': 'solid'}}),
# BORDER
# - style
('border-style: solid',
{'border': {'top': {'style': 'medium'},
'bottom': {'style': 'medium'},
'left': {'style': 'medium'},
'right': {'style': 'medium'}}}),
('border-style: solid; border-width: thin',
{'border': {'top': {'style': 'thin'},
'bottom': {'style': 'thin'},
'left': {'style': 'thin'},
'right': {'style': 'thin'}}}),
('border-top-style: solid; border-top-width: thin',
{'border': {'top': {'style': 'thin'}}}),
('border-top-style: solid; border-top-width: 1pt',
{'border': {'top': {'style': 'thin'}}}),
('border-top-style: solid',
{'border': {'top': {'style': 'medium'}}}),
('border-top-style: solid; border-top-width: medium',
{'border': {'top': {'style': 'medium'}}}),
('border-top-style: solid; border-top-width: 2pt',
{'border': {'top': {'style': 'medium'}}}),
('border-top-style: solid; border-top-width: thick',
{'border': {'top': {'style': 'thick'}}}),
('border-top-style: solid; border-top-width: 4pt',
{'border': {'top': {'style': 'thick'}}}),
('border-top-style: dotted',
{'border': {'top': {'style': 'mediumDashDotDot'}}}),
('border-top-style: dotted; border-top-width: thin',
{'border': {'top': {'style': 'dotted'}}}),
('border-top-style: dashed',
{'border': {'top': {'style': 'mediumDashed'}}}),
('border-top-style: dashed; border-top-width: thin',
{'border': {'top': {'style': 'dashed'}}}),
('border-top-style: double',
{'border': {'top': {'style': 'double'}}}),
# - color
('border-style: solid; border-color: #0000ff',
{'border': {'top': {'style': 'medium', 'color': '0000FF'},
'right': {'style': 'medium', 'color': '0000FF'},
'bottom': {'style': 'medium', 'color': '0000FF'},
'left': {'style': 'medium', 'color': '0000FF'}}}),
('border-top-style: double; border-top-color: blue',
{'border': {'top': {'style': 'double', 'color': '0000FF'}}}),
('border-top-style: solid; border-top-color: #06c',
{'border': {'top': {'style': 'medium', 'color': '0066CC'}}}),
# ALIGNMENT
# - horizontal
('text-align: center',
{'alignment': {'horizontal': 'center'}}),
('text-align: left',
{'alignment': {'horizontal': 'left'}}),
('text-align: right',
{'alignment': {'horizontal': 'right'}}),
('text-align: justify',
{'alignment': {'horizontal': 'justify'}}),
# - vertical
('vertical-align: top',
{'alignment': {'vertical': 'top'}}),
('vertical-align: text-top',
{'alignment': {'vertical': 'top'}}),
('vertical-align: middle',
{'alignment': {'vertical': 'center'}}),
('vertical-align: bottom',
{'alignment': {'vertical': 'bottom'}}),
('vertical-align: text-bottom',
{'alignment': {'vertical': 'bottom'}}),
# - wrap_text
('white-space: nowrap',
{'alignment': {'wrap_text': False}}),
('white-space: pre',
{'alignment': {'wrap_text': False}}),
('white-space: pre-line',
{'alignment': {'wrap_text': False}}),
('white-space: normal',
{'alignment': {'wrap_text': True}}),
# NUMBER FORMAT
('number-format: 0%',
{'number_format': {'format_code': '0%'}}),
])
def test_css_to_excel(css, expected):
convert = CSSToExcelConverter()
assert expected == convert(css)
def test_css_to_excel_multiple():
convert = CSSToExcelConverter()
actual = convert('''
font-weight: bold;
text-decoration: underline;
color: red;
border-width: thin;
text-align: center;
vertical-align: top;
unused: something;
''')
assert {"font": {"bold": True, "underline": "single", "color": "FF0000"},
"border": {"top": {"style": "thin"},
"right": {"style": "thin"},
"bottom": {"style": "thin"},
"left": {"style": "thin"}},
"alignment": {"horizontal": "center",
"vertical": "top"}} == actual
@pytest.mark.parametrize('css,inherited,expected', [
('font-weight: bold', '',
{'font': {'bold': True}}),
('', 'font-weight: bold',
{'font': {'bold': True}}),
('font-weight: bold', 'font-style: italic',
{'font': {'bold': True, 'italic': True}}),
('font-style: normal', 'font-style: italic',
{'font': {'italic': False}}),
('font-style: inherit', '', {}),
('font-style: normal; font-style: inherit', 'font-style: italic',
{'font': {'italic': True}}),
])
def test_css_to_excel_inherited(css, inherited, expected):
convert = CSSToExcelConverter(inherited)
assert expected == convert(css)
@pytest.mark.parametrize("input_color,output_color", (
[(name, rgb) for name, rgb in CSSToExcelConverter.NAMED_COLORS.items()] +
[("#" + rgb, rgb) for rgb in CSSToExcelConverter.NAMED_COLORS.values()] +
[("#F0F", "FF00FF"), ("#ABC", "AABBCC")])
)
def test_css_to_excel_good_colors(input_color, output_color):
# see gh-18392
css = ("border-top-color: {color}; "
"border-right-color: {color}; "
"border-bottom-color: {color}; "
"border-left-color: {color}; "
"background-color: {color}; "
"color: {color}").format(color=input_color)
expected = dict()
expected["fill"] = {
"patternType": "solid",
"fgColor": output_color
}
expected["font"] = {
"color": output_color
}
expected["border"] = {
k: {
"color": output_color,
} for k in ("top", "right", "bottom", "left")
}
with tm.assert_produces_warning(None):
convert = CSSToExcelConverter()
assert expected == convert(css)
@pytest.mark.parametrize("input_color", [None, "not-a-color"])
def test_css_to_excel_bad_colors(input_color):
# see gh-18392
css = ("border-top-color: {color}; "
"border-right-color: {color}; "
"border-bottom-color: {color}; "
"border-left-color: {color}; "
"background-color: {color}; "
"color: {color}").format(color=input_color)
expected = dict()
if input_color is not None:
expected["fill"] = {
"patternType": "solid"
}
with tm.assert_produces_warning(CSSWarning):
convert = CSSToExcelConverter()
assert expected == convert(css)
| bsd-3-clause |
fyffyt/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 248 | 6359 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
dmargala/lyabao | bin/inspect_deltas.py | 1 | 9353 | #!/usr/bin/env python
import argparse
from itertools import chain
import numpy as np
import numpy.ma as ma
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import h5py
from tqdm import tqdm
from uniform_grid import get_fiducial_pixel_index_offset
from uniform_grid import get_fiducial_wavelength
def main():
# parse command-line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
## targets to fit
parser.add_argument('--name', type=str, default=None,
help='base filename to use for input and output')
parser.add_argument('--max-rows', type=int, default=0,
help='max rows')
parser.add_argument('--max-ivar', type=float, default=200,
help='maximum pipeline ivar for binning')
parser.add_argument('--min-ivar', type=float, default=0,
help='minimum pipeline ivar for binning')
parser.add_argument('--num-ivar', type=int, default=50,
help='number of ivar bins for lss measurement')
parser.add_argument('--max-z', type=float, default=3.4,
help='maximum redshift for binning')
parser.add_argument('--min-z', type=float, default=2.0,
help='minimum redshift for binning')
parser.add_argument('--num-z', type=int, default=7,
help='number of redshift bins for lss var measurement')
parser.add_argument('--max-delta', type=float, default=1e3,
help='filter pixels where delta is >= this value')
args = parser.parse_args()
# import data
infile = h5py.File(args.name + '-delta.hdf5', 'r')
lines_of_sight = infile['lines_of_sight']
# read attributes with info on processing so far
coeff0 = infile.attrs['coeff0']
coeff1 = infile.attrs['coeff1']
num_wave_obs = infile.attrs['max_fid_index']
try:
wave_lya = infile.attrs['wave_lya']
except KeyError:
wave_lya = 1216.0
num_sightlines = len(lines_of_sight.keys())
# if specified, only use max_rows number of targets
if args.max_rows > 0:
num_sightlines = args.max_rows
print('num sightlines: {:d}'.format(num_sightlines))
# loop over targets
waves = []
deltas = []
ivars = []
weights = []
thing_ids = lines_of_sight.keys()[:num_sightlines]
for i, thing_id in tqdm(enumerate(thing_ids), total=num_sightlines):
los = lines_of_sight[thing_id]
z = los.attrs['z']
loglam = los['loglam'].value
delta = los['delta'].value
# if np.any(np.abs(delta) > 1e3):
# # print thing_id, los.attrs['plate'], los.attrs['mjd'], los.attrs['fiber']
# continue
ivar = los['ivar'].value
weight = los['weight'].value
valid = (ivar > 0) & (np.abs(delta) < args.max_delta)
waves.append(np.power(10, loglam[valid]))
deltas.append(delta[valid])
ivars.append(ivar[valid])
weights.append(weight[valid])
infile.close()
# concatenate all pixels
all_waves = np.fromiter(chain.from_iterable(waves), np.float)
all_deltas = np.fromiter(chain.from_iterable(deltas), np.float)
all_ivars = np.fromiter(chain.from_iterable(ivars), np.float)
all_weights = np.fromiter(chain.from_iterable(weights), np.float)
print('Total number of pixels: {:d}'.format(len(all_waves)))
# set up binning for observed frame stats
# subtract 0.5 so that these are bin edges instead of bin centers
wave_bins = get_fiducial_wavelength(
np.arange(num_wave_obs + 1) - 0.5, coeff1=coeff1)
wave_bin_centers = get_fiducial_wavelength(
np.arange(num_wave_obs), coeff1=coeff1)
# determine observed wavelength bin indices
wave_bin_indices = np.digitize(all_waves, wave_bins)
# calculate stats as a function of observed wavelength
counts_per_obs_pixel = ma.masked_all(num_wave_obs)
mean_delta = ma.masked_all(num_wave_obs)
wmean_delta = ma.masked_all(num_wave_obs)
for i in np.unique(wave_bin_indices):
i_indices = wave_bin_indices == i
if i - 1 >= num_wave_obs:
continue
counts_per_obs_pixel[i - 1] = ma.count(all_waves[i_indices])
mean_delta[i - 1] = ma.average(all_deltas[i_indices])
wmean_delta[i - 1] = ma.average(
all_deltas[i_indices], weights=all_weights[i_indices]
)
# set up binning for lss variance measurement
ivar_bin_edges = np.linspace(
args.min_ivar, args.max_ivar, args.num_ivar + 1)
ivar_bin_centers = 0.5 * (ivar_bin_edges[:-1] + ivar_bin_edges[1:])
# we want to perform this measurement in redshift slices
z_bin_edges = np.linspace(args.min_z, args.max_z, args.num_z+1)
z_bin_centers = 0.5 * (z_bin_edges[:-1] + z_bin_edges[1:])
all_redshifts = all_waves/wave_lya - 1.0
ivar_delta_per_ivar = ma.zeros((args.num_z, args.num_ivar))
ivar_delta_per_ivar.mask = True
counts_per_ivar = ma.zeros((args.num_z, args.num_ivar))
counts_per_ivar.mask = True
for j in range(args.num_z):
# select pixels for the current redshift interval
z_slice = (
(all_redshifts >= z_bin_edges[j]) &
(all_redshifts < z_bin_edges[j+1])
)
# determine pipeline ivar bins
ivar_bin_indices = np.digitize(all_ivars[z_slice], ivar_bin_edges)
# compute stats per ivar bin
for i in np.unique(ivar_bin_indices):
# skip ivar bins outside our grid
if i - 1 >= args.num_ivar:
continue
ivar_slice = ivar_bin_indices == i
mean = ma.average(
all_deltas[z_slice][ivar_slice],
weights=all_ivars[z_slice][ivar_slice])
var = ma.average(
(all_deltas[z_slice][ivar_slice]-mean)**2,
weights=all_ivars[z_slice][ivar_slice])
if var > 0:
ivar_delta_per_ivar[j, i - 1] = 1.0/var
counts_per_ivar[j, i - 1] = ma.count(all_deltas[z_slice][ivar_slice])
print('Fitting lss ivar...')
def lss_ivar_chisq(p):
ivar_model = (
p[:args.num_z, np.newaxis]/ivar_bin_centers[np.newaxis, :] +
p[args.num_z:, np.newaxis]
)
dy = 1.0/ivar_delta_per_ivar - ivar_model
return ma.average(dy*dy, weights=1.0/counts_per_ivar)
p0 = np.concatenate([np.ones(args.num_z), np.zeros(args.num_z)])
result = minimize(lss_ivar_chisq, p0)
print(result.x)
# create figures
savefig_kwargs = {
'bbox_inches': 'tight',
'dpi': 100
}
plt.figure(figsize=(12, 9))
for i in range(args.num_z):
p, = plt.plot(
ivar_bin_centers, ivar_delta_per_ivar[i],
marker='o', label=z_bin_centers[i], lw=0
)
plt.plot(
ivar_bin_centers,
1.0/(result.x[i]/ivar_bin_centers + result.x[args.num_z + i]),
color=p.get_color()
)
plt.xlim(ivar_bin_edges[0], ivar_bin_edges[-1])
plt.xlabel('Pipeline ivar')
plt.ylim(0, 30)
plt.ylabel('Inverse Delta Variance')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig(args.name + '-delta-ivar.png', **savefig_kwargs)
plt.figure(figsize=(12, 9))
plt.plot(wave_bin_centers, mean_delta,
marker='+', lw=0, label='Mean')
plt.plot(wave_bin_centers, wmean_delta,
marker='+', lw=0, label='Weighted Mean')
plt.xlim(wave_bins[0], wave_bins[-1])
plt.ylim(-2, 2)
plt.xlabel('Observed Wavelength')
plt.ylabel('Mean Delta')
plt.legend()
plt.savefig(args.name + '-delta-mean-obs.png', **savefig_kwargs)
plt.figure(figsize=(12, 9))
plt.scatter(wave_bin_centers, counts_per_obs_pixel)
plt.xlim(wave_bins[0], wave_bins[-1])
plt.xlabel('Observed Wavelength')
plt.ylabel('Counts per obs pixel')
plt.savefig(args.name + '-wave-hist-obs.png', **savefig_kwargs)
plt.figure(figsize=(12, 9))
plt.hist(all_waves, bins=wave_bins, histtype='step')
plt.xlim(wave_bins[0], wave_bins[-1])
plt.xlabel('Observed Wavelength')
plt.ylabel('Num pixels')
plt.savefig(args.name + '-wave-hist.png', **savefig_kwargs)
plt.figure(figsize=(12, 9))
plt.scatter(all_waves, all_deltas, marker=',', lw=0, s=1)
plt.xlim(wave_bins[0], wave_bins[-1])
plt.xlabel('Observed Wavelength')
plt.savefig(args.name + '-delta-scatter.png', **savefig_kwargs)
plt.figure(figsize=(12, 9))
plt.scatter(all_waves, all_ivars, marker=',', lw=0, s=1)
plt.xlim(wave_bins[0], wave_bins[-1])
plt.xlabel('Observed Wavelength')
plt.savefig(args.name + '-ivar-scatter.png', **savefig_kwargs)
plt.figure(figsize=(12, 9))
plt.scatter(all_ivars, np.abs(all_deltas), marker=',', lw=0, s=1)
plt.xscale('log')
plt.yscale('log')
plt.ylim(1e-8, 1e18)
plt.xlim(1e-18, 1e4)
plt.xlabel('ivar')
plt.ylabel('delta')
plt.savefig(args.name + '-ivar-delta-scatter.png', **savefig_kwargs)
plt.figure(figsize=(12, 9))
plt.scatter(all_weights, np.abs(all_deltas), marker=',', lw=0, s=1)
plt.xscale('log')
plt.yscale('log')
plt.ylim(1e-8, 1e18)
plt.xlim(1e-18, 1e4)
plt.xlabel('weight')
plt.ylabel('delta')
plt.savefig(args.name + '-weight-delta-scatter.png', **savefig_kwargs)
if __name__ == '__main__':
main()
| mit |
joernhees/scikit-learn | examples/svm/plot_iris.py | 65 | 3742 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
# import some data to play with
iris = datasets.load_iris()
# Take the first two features. We could avoid this by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
models = (svm.SVC(kernel='linear', C=C),
svm.LinearSVC(C=C),
svm.SVC(kernel='rbf', gamma=0.7, C=C),
svm.SVC(kernel='poly', degree=3, C=C))
models = (clf.fit(X, y) for clf in models)
# title for the plots
titles = ('SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel')
# Set-up 2x2 grid for plotting.
fig, sub = plt.subplots(2, 2)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
for clf, title, ax in zip(models, titles, sub.flatten()):
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel('Sepal length')
ax.set_ylabel('Sepal width')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
| bsd-3-clause |
xavierwu/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
asford/depth | bin/binding_site_prediction_app.py | 1 | 8481 | # This is part of DEPTH.
# DEPTH (Version: 2.0) computes the closest distance of a residue/atom to bulk solvent and predicts small molecule binding site of a protein.
# Copyright (C) 2013, Kuan Pern Tan, Nguyen Thanh Binh, Raghavan Varadarajan and M.S. Madhusudhan
#
# DEPTH is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# DEPTH is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with DEPTH. If not, see <http://www.gnu.org/licenses/>.
# this script compute site and write the output into a formatted html file
from apps_backend import *
from numpy import *
import os
import tempfile
os.environ['MPLCONFIGDIR'] = tempfile.mkdtemp()
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import commands
import sys
def predict_binding_site(in_site_parameters, out_site_parameters, job_parameters):
# get input
gpdb_name, workdir, exedir, pka_exedir, cavity_exedir, jmol_dir, home_dir, home_name = job_parameters
pdb_name = gpdb_name.split('/')[-1]
depth_exe = exedir+'/DEPTH'
ASA_exe = exedir+'/ASA'
hotspot_exe = exedir+'/predict-binding-site'
distmatrix_exe = exedir+'/distmatrix_self'
bin_dir = exedir
# output file names
cavity_pred_out = pdb_name + '.pred'
cavity_pred_pdb = pdb_name + '.pred.pdb'
cavity_site_pdb = pdb_name + '.binary.pdb'
# input parameters
in_site_threshold, in_site_cavity_size, in_site_iteration_sol, in_site_dist_sol, in_site_iteration_depth, in_site_solN, in_site_resolution, in_site_probe_radius, use_msa = in_site_parameters
# output parameters
out_site_jmol, out_site_depth_asa = out_site_parameters
# run predict binding site program
use_msa = str(int(use_msa))
inputs = ['cd', workdir, ';', 'python', exedir+ 'predict_site.py', pdb_name, depth_exe, ASA_exe, hotspot_exe, bin_dir, distmatrix_exe, in_site_threshold, in_site_cavity_size, cavity_pred_pdb,
cavity_pred_out, bin_dir, in_site_iteration_sol, in_site_dist_sol, in_site_iteration_depth, in_site_solN, in_site_resolution, in_site_probe_radius, use_msa]
cmd = ' '.join(inputs)
fout = open(gpdb_name+'.err', 'w')
fout.writelines(cmd)
commands.getoutput(cmd)
# Generate output
# generate navigation list for requested outputs
B = bookmarks()
Content = []
link_top = 'top' # hard-coding for top bookmark here !
link1, link2, link3 = [cavity_pred_pdb, cavity_pred_out, cavity_site_pdb]
inputs = ['cd', workdir, ';', 'python', exedir+'get_binding_residues.py', cavity_site_pdb]
cmd = ' '.join(inputs)
fout.writelines('\n'+cmd+'\n')
binding_residues = commands.getoutput(cmd)
fout.writelines(binding_residues+'\n')
fout.close()
# recommended_threshold = {2:0.36, 3:0.315, 4:0.31, 5:0.33}
# recommended_threshold = {2:0.36, 3:0.315, 4:0.50, 5:0.45}
# thresholds = [recommended_threshold[int(float(in_site_solN))], float(in_site_threshold)]
# labels = ['Recommended Threshold', 'User Defined Threshold']
# hlines = [thresholds, labels]
img1 = plot_site_prediction(workdir+'/'+cavity_pred_out)#, hlines)
# generate jmol script
pdbfname = cavity_site_pdb.split('/')[-1]
script1 = gen_site_jmol_script(pdbfname, "../../jmol-12.0.22/")
B.add('out_site_result', 'BINDING SITE PREDICTION')
if out_site_jmol == True:
Content.append(site_output_II('BINDING SITE PREDICTION', 'out_site_result', binding_residues, link_top, link1, link2, link3, img1, script1, [home_dir, home_name]))
else:
Content.append(site_output_I ('BINDING SITE PREDICTION', 'out_site_result', binding_residues, link_top, link1, link2, link3, img1, [home_dir, home_name]))
# end if
# generate content and write to html file
Navigator = '\n'.join(B.generate())+'\n\n'
Content = '\n\n'.join(Content)
site_content_outfile = gpdb_name+'.site-content.html'
fout = open(site_content_outfile, 'w')
fout.writelines(Content)
fout.close()
site_navigation_outfile = gpdb_name+'.site-navigation.html'
fout = open(site_navigation_outfile, 'w')
fout.writelines(Navigator)
fout.close()
site_outfiles = [site_content_outfile, site_navigation_outfile]
return site_outfiles
# end def
def plot_site_prediction(cavity_pred_out, hlines = None):
# read data
residue_label, P = zip(*read_table(cavity_pred_out)[1:])
# quick fix for bug
P_new = []
for t in P:
if isnan(t):
P_new.append(0)
else:
P_new.append(t)
# end if
# end for
P = P_new
# output file name
prediction_plotname = cavity_pred_out+ '.site_prediction.png'
# generate plots according to requests
xnames = residue_label
xlabel, ylabel = ['Residue No.', r'Probability ($\AA$)']
title = "Probability of Residue Forming a Binding Site"
plot_type_II(P, xnames, hlines, title, xlabel, ylabel, prediction_plotname, xlim='auto', ylim='auto')
return prediction_plotname
# end def
def gen_site_jmol_script(pdbfname, jmol_dir):
lines = []
lines.append('<script type="text/javascript" language="JavaScript" src="'+jmol_dir+'Jmol.js"></script>')
lines.append('<script>')
lines.append(' jmolInitialize("'+jmol_dir+'", "JmolApplet.jar")')
lines.append(" jmolSetAppletColor('white')")
lines.append(" jmolApplet(500, 'load "+pdbfname+"; cpk off; frame all; cpk off; wireframe off; spacefill off; cartoon off ; isosurface surf molecular colorscheme bwr property temperature ', 'cavity')")
lines.append(' jmolBr();')
lines.append('</script>')
return '\n'.join(lines)
# end def
def site_output_I(title, bookmark, binding_residues, link_top, link1, link2, link3, img1, dir_des): # html output table for other site
home_dir, home_name = dir_des
link1 = link1.replace(home_dir, home_name)
link2 = link2.replace(home_dir, home_name)
link3 = link3.replace(home_dir, home_name)
img1 = img1.replace(home_dir, home_name)
out = []
out.append('<table> <a name="'+bookmark+'"></a>')
out.append(' <h4>'+title+'</h4>')
out.append(' <tbody> <tr><td> download prediction output in <a href="'+link3+'"> PDB </a> format </td></tr>')
out.append(' <tr><td> download probability output in <a href="'+link1+'"> PDB </a> or <a href="'+link2+'">tab-delimited</a> format ')
out.append(' </td></tr> <tr><td>')
out.append(' <table>')
out.append(' <tr><td> <table border="1"> <tr><td> <img src ="'+img1+'"></img> </td></tr></table> </td></tr>')
out.append(' </table> ')
out.append(' <tr><td> <div> <h4> Predicted Binding Residues </h4><br> ' + binding_residues +' </div></td></tr>')
out.append(' </td></tr> <tr><td align="right">')
out.append(' <a href=#'+link_top+'>[back to top]</a>')
out.append(' </td></tr>')
out.append(' </tbody>')
out.append('</table>')
return '\n'.join(out)
# end def
def site_output_II(title, bookmark, binding_residues, link_top, link1, link2, link3, img1, script1, dir_des): # html output table for other site
home_dir, home_name = dir_des
link1 = link1.replace(home_dir, home_name)
link2 = link2.replace(home_dir, home_name)
link3 = link3.replace(home_dir, home_name)
img1 = img1.replace(home_dir, home_name)
out = []
out.append('<table> <a name="'+bookmark+'"></a>')
out.append(' <h4>'+title+'</h4>')
out.append(' <tbody> <tr><td> download prediction output in <a href="'+link3+'"> PDB </a> format </td></tr>')
out.append(' <tr><td> download probability output in <a href="'+link1+'"> PDB </a> or <a href="'+link2+'">tab-delimited</a> format ')
out.append(' </td></tr> <tr><td>')
out.append(' <tr><td> <div style="float:left; display:block"><table border="1"> <tr><td> '+script1+' </td></tr>')
out.append(' <tr><td>protein surface: <font color="blue"> blue </font> <br> Residues lining binding cavities: <font color="red">Red</font></td></tr>')
out.append(' </table></div>')
out.append(' <div style="float:left; display:block"><img src ="'+img1+'"></img></div>')
out.append(' <tr><td> <div style="border-style:solid"> <p><b> Predicted Binding Residues </b></p> <p>' + binding_residues +' </p> </div></td></tr>')
out.append(' </table> ')
out.append(' </td></tr> <tr><td align="right">')
out.append(' <a href=#'+link_top+'>[back to top]</a>')
out.append(' </td></tr>')
out.append(' </tbody>')
out.append('</table>')
return '\n'.join(out)
# end def
| gpl-3.0 |
bikong2/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
alexeyum/scikit-learn | sklearn/tests/test_metaestimators.py | 57 | 4958 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
alextag/Twitter-Sentiment-Analysis | parse_dataset.py | 2 | 5576 | import sys
import string
import numpy as np
from pandas import read_csv
from sklearn.utils import shuffle
from sklearn.cross_validation import train_test_split
STOP_WORDS = np.array([])
FILENAME = 'training_data.csv' # the training data csv
RANDOM_SEED = 1337
def load_stopwords():
"""Loads the stopwords.txt file into an array"""
print('loading stopwords')
with open('stopwords.txt') as f:
global STOP_WORDS
STOP_WORDS = np.array(f.read().splitlines())
print('loaded stopwords')
def load_csv():
"""
Loads the csv file
Returns:
corpus: An array version of the FILENAME
"""
print('reading from csv')
corpus = read_csv(FILENAME)
print('read from csv')
return corpus
def parse_corpus(corpus):
"""
Parses the corpus and returns the inputs and targets
Returns:
inputs: A numpy array of the tweets
targets: A numpy array of the sentiment, 1 for positive, 0 for negative
"""
print('parsing corpus')
corpus.columns = ["sentiment", "2", "3", "4", "5", "tweet"]
inputs = np.array(corpus["tweet"])
targets = np.array(corpus["sentiment"])
print('parsed corpus into numpy arrays')
return inputs, targets
def remove_neutral_tweets(inputs, targets):
"""
Parses the corpus and returns the inputs and targets
Returns:
inputs: A numpy array of the tweets
targets: A numpy array of the sentiment, 1 for positive, 0 for negative
"""
print('removing neutral tweets')
count = 0
for i in range(len(inputs)):
if targets[i] == 2: # Remove tweets with neutral sentiment
count += 1
np.delete(inputs, i)
np.delete(targets, i)
print('removed {0} neutral tweets'.format(count))
return inputs, targets
def remove_stopwords(inputs, stopwords):
"""
Parses the inputs and removes stopwords.
Returns:
inputs: A numpy array of the tweets
"""
print('removing stopwords from tweets')
count = 0
for i in range(len(inputs)):
tweet_list = inputs[i].split()
inputs[i] = ' '.join([j for j in tweet_list if j not in stopwords])
print('removed stopwords from tweets')
return inputs
def remove_empty_tweets(inputs, targets):
"""
Parses the inputs and removes input and target where the input is empty.
Removes data where the tweet content is empty.
Returns:
inputs: A numpy array of the tweets
targets: A numpy array of the sentiment, 1 for positive, 0 for negative
"""
print('removing empty tweets')
i = 0
count = 0
length = len(inputs)
while i < length:
if inputs[i] == ' ' or inputs[i] == '':
inputs = np.delete(inputs, i)
targets = np.delete(targets, i)
i -= 1
length -= 1
i += 1
print('removed {0} tweets from dataset since tweets were empty'.format(count))
return inputs, targets
def remove_punctuation(inputs):
"""
Parses the inputs and removes punctuation from tweet content.
Returns:
inputs: A numpy array of the tweets
"""
print('removing punctuation from tweet content')
table = string.maketrans("","")
for i in range(len(inputs)):
inputs[i] = inputs[i].translate(table, string.punctuation)
print('removed punctuation from tweet content')
return inputs
def main():
"""
CLI Arguments allowed:
keep_stopwords Keep stopwords in tweet content
By default removes stopwords
keep_neutral_tweets Keeps tweets with neutral sentiment
By default removes neutral tweets
keep_punctuation Keeps punctuation in tweet content
By default removes punctuation from tweet content
"""
cli_args = sys.argv
corpus = load_csv()
inputs, targets = parse_corpus(corpus)
raw_inputs = inputs
targets = (targets > 0) * 1 # Changes target array to 0s and 1s
if not 'keep_neutral_tweets' in cli_args:
inputs, targets = remove_neutral_tweets(inputs, targets)
if not 'keep_stopwords' in cli_args:
load_stopwords()
inputs = remove_stopwords(inputs, STOP_WORDS)
if not 'keep_punctuation' in cli_args:
inputs = remove_punctuation(inputs)
inputs, targets = remove_empty_tweets(inputs, targets)
# Train set size = 80 %
# Test set size = 10 %
# Validation set size = 10 %
print('splitting data in to train, test, and validation sets')
inputs_train, inputs_test, targets_train, targets_test = train_test_split(
inputs,
targets,
test_size=0.2,
random_state=RANDOM_SEED
)
inputs_test, inputs_valid, targets_test, targets_valid = train_test_split(
inputs_test,
targets_test,
test_size=0.5,
random_state=RANDOM_SEED
)
print('split data in to train, test, and validation sets')
assert len(inputs_train) == len(targets_train)
assert len(inputs_valid) == len(targets_valid)
assert len(inputs_test) == len(targets_test)
print('Length of train set: {0}'.format(len(inputs_train)))
print('Length of validation set: {0}'.format(len(inputs_valid)))
print('Length of test set: {0}'.format(len(inputs_test)))
total_data_length = len(inputs_train) + len(inputs_valid) + len(inputs_test)
original_data_length = len(raw_inputs)
print('Original dataset length: {0}, parsed dataset length: {1}'.format(
original_data_length,
total_data_length
))
print('saving parsed dataset')
np.save('parsed_data/inputs_train.npy', inputs_train)
np.save('parsed_data/targets_train.npy', targets_train)
np.save('parsed_data/inputs_valid.npy', inputs_valid)
np.save('parsed_data/targets_valid.npy', targets_valid)
np.save('parsed_data/inputs_test.npy', inputs_test)
np.save('parsed_data/targets_test.npy', targets_test)
np.save('parsed_data/raw_inputs.npy', raw_inputs)
print('saved parsed dataset')
print('parsing complete!')
if __name__ == "__main__": main()
| gpl-3.0 |
nlholdem/icodoom | .venv/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 88 | 31139 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert (isinstance(n_classes, dict))
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else \
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if x_is_dict else check_array(y, y.dtype)
# self.n_classes is not None means we're converting raw target indices to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict \
else _check_dtype(self._x.dtype)
# note: self._output_dtype = np.float32 when y is None
self._output_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict \
else _check_dtype(self._y.dtype) if y is not None else np.float32
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
num_samples = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| gpl-3.0 |
ChangLab/FAST-iCLIP | bin/oldscripts/fastclip.py | 2 | 65904 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import os
import cmath
import math
import sys
import numpy as np
import glob
import subprocess
import re
from matplotlib_venn import venn2
import pandas as pd
from collections import defaultdict
from operator import itemgetter
import matplotlib as mpl
import matplotlib.pyplot as plt
import shutil
from optparse import OptionParser
mpl.rcParams['savefig.dpi'] = 2 * mpl.rcParams['savefig.dpi']
# <codecell>
global sampleName
global outfilepath
global logFile
global logOpen
### File name ###
sampleName=sys.argv[1]
infilepath=os.getcwd() + '/' + 'rawdata/'
outfilepath=os.getcwd() + '/results/%s/'%sampleName
# <codecell>
# Create log and start pipeline
logFile=outfilepath + "runLog"
logOpen=open(logFile, 'w')
# <codecell>
### Parameters ###
iCLIP3pBarcode='AGATCGGAAGAGCGGTTCAGCAGGAATGCCGAGACCGATCTCGTATGCCGTCTTCTGCTTG' # Barcode sequence to trim from reads.
q=25 # Minimum quality score to keep during filtering.
p=80 # Percentage of bases that must have quality > q during filtering.
iCLIP5pBasesToTrim=13 # Number of reads to trim from 5' end of clip reads.
k='1' # k=N distinct, valid alignments for each read in bt2 mapping.
threshold=3 # Sum of RT stops (for both replicates) required to keep file.
expand=15 # Bases to expand around RT position after RT stops are merged.
repeat_index=os.getcwd() + '/docs/repeat/rep' # bt2 index for repeat RNA.
repeatGenomeBuild=os.getcwd()+'/docs/repeat/repeatRNA.fa' # Sequence of repeat index.
repeatAnnotation=os.getcwd()+'/docs/repeat/Hs_repeatIndex_positions.txt' # Repeat annotation file.
start18s=3657
end18s=5527
start5s=6623
end5s=6779
start28s=7935
end28s=12969
rRNAend=13314
threshold_rep=1 # RT stop threshold for repeat index.
index=os.getcwd() + '/docs/hg19/hg19' # bt2 index for mapping.
index_tag='hg19' # Name of bt2 index.
genomeFile=os.getcwd()+'/docs/human.hg19.genome' # Genome file for bedGraph, etc.
genomeForCLIPper='-shg19' # Parameter for CLIPper.
blacklistregions=os.getcwd()+'/docs/wgEncodeDukeMapabilityRegionsExcludable.bed' # Blacklist masker.
repeatregions=os.getcwd()+'/docs/repeat_masker.bed' # Repeat masker.
geneAnnot=glob.glob(os.getcwd()+'/docs/genes_types/*') # List of genes by type.
snoRNAmasker=os.getcwd()+'/docs/snoRNA_reference/snoRNAmasker_formatted_5pExtend.bed' # snoRNA masker file.
miRNAmasker=os.getcwd()+'/docs/miR_sort_clean.bed' # miRNA masker file.
fivePUTRBed=os.getcwd()+'/docs/5pUTRs_Ensbl_sort_clean_uniq.bed' # UTR annotation file.
threePUTRBed=os.getcwd()+'/docs/3pUTRs_Ensbl_sort_clean_uniq.bed' # UTR annotation file.
cdsBed=os.getcwd()+'/docs/Exons_Ensbl_sort_clean_uniq.bed' # UTR annotation file.
utrFile=os.getcwd()+'/docs/hg19_ensembl_UTR_annotation.txt' # UTR annotation file.
genesFile=os.getcwd()+'/docs/hg19_ensembl_genes.txt' # Gene annotation file.
sizesFile=os.getcwd()+'/docs/hg19.sizes' # Genome sizes file.
snoRNAindex=os.getcwd()+'/docs/snoRNA_reference/sno_coordinates_hg19_formatted.bed' # snoRNA coordinate file.
CLIPPERoutNameDelim='_' # Delimiter that for splitting gene name in the CLIPper windows file.
# <codecell>
import datetime
now=datetime.datetime.now()
logOpen.write("Timestamp:%s\n"%str(now))
logOpen.write("\n###Parameters used###\n")
logOpen.write("3' barcode:%s\n'"%iCLIP3pBarcode)
logOpen.write("Minimum quality score (q):%s\n"%q)
logOpen.write("Percentage of bases with > q:%s\n"%p)
logOpen.write("5' bases to trim:%s\n'"%iCLIP5pBasesToTrim)
logOpen.write("k distinct, valid alignments for each read in bt2 mapping:%s\n"%k)
logOpen.write("Threshold for minimum number of RT stops:%s\n"%threshold)
logOpen.write("Bases for expansion around conserved RT stops:%s\n"%expand)
logOpen.write("\n\n\n")
# <codecell>
print "Processing sample %s" %(sampleName)
logOpen.write("Processing sample: "+sampleName+'\n')
read1=infilepath+sampleName+'_R1.fastq'
read2=infilepath+sampleName+'_R2.fastq'
unzippedreads=[read1,read2]
# <codecell>
def trimReads3p(unzippedreads,adapter3p):
# Usage: Trims a specified adapter sequence from the 3p end of the reads.
# Input: List of fastq files.
# Output: List of 3p trimmed files.
trimparam='-a'+adapter3p # Adapter string
trimmedReads=[]
try:
for inread in unzippedreads:
outread=inread.replace("rawdata/", "results/%s/"%sampleName)
outread=outread.replace(".fastq", "_3ptrimmed.fastq")
process=subprocess.Popen(['fastx_clipper',trimparam,'-n','-l33','-Q33','-i',inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
logOpen.write("Trim 3p end of reads.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
trimmedReads=trimmedReads+[outread]
return trimmedReads
except:
logOpen.write("Problem with 3p trimming.\n")
print "Problem with 3p trimming."
print "Trim 3p adapter from reads."
trimmedReads3p=trimReads3p(unzippedreads,iCLIP3pBarcode)
# <codecell>
def qualityFilter(trim3pReads,q,p):
# Usage: Filters reads based upon quality score.
# Input: List of fastq file names as well as the quality paramters p and q.
# Output: List of modified fastq file names.
qualityparam='-q'+str(q)
percentrageparam='-p'+str(p)
filteredReads=[]
try:
for inread in trim3pReads:
outread=inread.replace(".fastq", "_filter.fastq")
process=subprocess.Popen(['fastq_quality_filter',qualityparam,percentrageparam,'-Q33','-i',inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr=process.communicate()
logOpen.write("Perform quality filtering.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
filteredReads=filteredReads+[outread]
return filteredReads
except:
logOpen.write("Problem with quality filter.\n")
print "Problem with quality filter."
print "Perform quality filtering."
filteredReads=qualityFilter(trimmedReads3p,q,p)
# <codecell>
def dupRemoval(filteredReads):
# Usage: Removes duplicate reads.
# Input: List of fastq file names.
# Output: List of reads in FASTA format.
program=os.getcwd() + '/bin/fasta_to_fastq.pl'
noDupes=[]
try:
for inread in filteredReads:
outread=inread.replace(".fastq","_nodupe.fasta")
process=subprocess.Popen(['fastx_collapser','-Q33','-i',inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr=process.communicate()
logOpen.write("Perform duplicate removal.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
fastqOut=outread.replace('.fasta', '.fastq') # fastx_collapser returns fasta files, which are then converted to fastq.
outfh=open(fastqOut, 'w')
process=subprocess.Popen(['perl',program,outread],stdout=outfh)
process.communicate() # Wait for the process to complete.
os.remove(outread) # Remove the remaining .fasta file.
noDupes=noDupes+[fastqOut]
return noDupes
except:
logOpen.write("Problem with duplicate removal.\n")
print "Problem with duplicate removal."
print "Perform duplicate removal."
nodupReads=dupRemoval(filteredReads)
# <codecell>
def trimReads5p(nodupes,n):
# Usage: Trims a specified number of bases from the 5' end of each read.
# Input: List of fastq files.
# Output: List of 5p trimmed files.
trimparam='-f'+str(n)
trimmedReads=[]
try:
for inread in nodupes:
outread=inread.replace(".fastq", "_5ptrimmed.fastq")
process=subprocess.Popen(['fastx_trimmer', trimparam, '-Q33', '-i', inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr=process.communicate()
logOpen.write("Perform 5' barcode trimming.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
trimmedReads=trimmedReads+[outread]
return trimmedReads
except:
logOpen.write("Problem with 5' barcode trimming.\n")
print "Problem with 5' barcode trimming."
print "Perform 5' barcode trimming."
trimmedReads5p=trimReads5p(nodupReads,iCLIP5pBasesToTrim)
# <codecell>
def runBowtie(fastqFiles,index,index_tag):
# Usage: Read mapping to reference.
# Input: Fastq files of replicate trimmed read files.
# Output: Path to samfile for each read.
program='bowtie2'
mappedReads=[]
unMappedReads=[]
try:
for infastq in fastqFiles:
outfile=infastq.replace(".fastq","_mappedTo%s.sam"%index_tag)
unmapped=infastq.replace(".fastq","_notMappedTo%s.fastq"%index_tag)
process=subprocess.Popen([program,'-x',index,'-k',k,'-U',infastq,'--un',unmapped,'-S',outfile],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout,stderr=process.communicate()
logOpen.write("Perform mapping to %s index.\n"%index_tag)
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
mappedReads = mappedReads + [outfile]
unMappedReads = unMappedReads + [unmapped]
return (mappedReads,unMappedReads)
except:
logOpen.write("Problem with mapping.\n")
print "Problem with mapping."
print "Run mapping to repeat index."
mappedReads_rep,unmappedReads_rep=runBowtie(trimmedReads5p,repeat_index,'repeat')
# <codecell>
def runSamtools(samfiles):
# Usage: Samfile processing.
# Input: Sam files from Bowtie mapping.
# Output: Sorted bedFiles.
program = 'samtools'
program2 = 'bamToBed'
outBedFiles=[]
try:
for samfile in samfiles:
bamfile = samfile.replace('.sam','.bam')
proc = subprocess.Popen( [program,'view','-bS','-o', bamfile, samfile])
proc.communicate()
bamfile_sort = bamfile.replace('.bam','_sorted')
proc2 = subprocess.Popen([program,'sort',bamfile, bamfile_sort])
proc2.communicate()
bedFile = bamfile_sort.replace('_sorted', '_withDupes.bed')
outfh = open(bedFile,'w')
proc3 = subprocess.Popen( [program2,'-i', bamfile_sort+'.bam'],stdout=outfh)
proc3.communicate()
outBedFiles=outBedFiles+[bedFile]
return outBedFiles
except:
logOpen.write("Problem with samtools.\n")
print "Problem with samtools."
print "Run samtools."
logOpen.write("Run samtools.\n")
mappedBedFiles_rep=runSamtools(mappedReads_rep)
# <codecell>
def seperateStrands(mappedReads):
# Usage: Seperate positive and negative strands.
# Input: Paths to two bed files from Samtools.
# Output: Paths to bed files isolated by strand.
negativeStrand=[]
positiveStrand=[]
for mapFile in mappedReads:
with open(mapFile, 'r') as infile:
neg_strand=mapFile.replace('.bed','_neg.bed')
pos_strand=mapFile.replace('.bed','_pos.bed')
neg = open(neg_strand, 'w')
pos = open(pos_strand, 'w')
negativeStrand=negativeStrand+[neg_strand]
positiveStrand=positiveStrand+[pos_strand]
for line in infile:
if str(line.strip().split('\t')[5]) == '-':
neg.write(line)
elif str(line.strip().split('\t')[5]) == '+':
pos.write(line)
return (negativeStrand,positiveStrand)
def modifyNegativeStrand(negativeStrandReads):
# Usage: For negative stranded reads, ensure 5' position (RT stop) is listed first.
# Input: Bed file paths to all negative stranded.
# Output: Paths to modified bed files.
negativeStrandEdit=[]
for negativeRead in negativeStrandReads:
neg_strand_edited=negativeRead.replace('_neg.bed','_negEdit.bed')
negativeStrandEdit=negativeStrandEdit+[neg_strand_edited]
neg_edit = open(neg_strand_edited, 'w')
with open(negativeRead, 'r') as infile:
for line in infile:
chrom,start,end,name,quality,strand=line.strip().split('\t')
neg_edit.write('\t'.join((chrom,end,str(int(end)+30),name,quality,strand))+'\n')
return negativeStrandEdit
def isolate5prime(strandedReads):
# Usage: Isolate only the Chr, 5' position (RT stop), and strand.
# Input: Bed file paths to strand seperated reads.
# Output: Paths RT stop files.
RTstops=[]
for reads in strandedReads:
RTstop=reads.replace('.bed','_RTstop.bed')
f = open(RTstop, 'w')
with open(reads, 'r') as infile:
RTstops=RTstops+[RTstop]
for line in infile:
chrom,start,end,name,quality,strand=line.strip().split('\t')
f.write('\t'.join((chrom,start,strand))+'\n')
return RTstops
print "RT stop isolation (repeat)."
logOpen.write("RT stop isolation (repeat).\n")
readsByStrand_rep=seperateStrands(mappedBedFiles_rep)
negativeRTstop_rep=isolate5prime(modifyNegativeStrand(readsByStrand_rep[0]))
positiveRTstop_rep=isolate5prime(readsByStrand_rep[1])
# <codecell>
def fileCat(destinationFile,fileList):
f = open(destinationFile, "w")
for tempfile in fileList:
readfile = open(tempfile, "r")
f.write(readfile.read())
readfile.close()
f.close()
def RTcounts(RTfile):
posRT_R1=pd.DataFrame(pd.read_table(RTfile,index_col=None,header=None,sep='\t'))
posRT_R1.columns=['Chr','Start','Strand']
cts=posRT_R1.groupby(['Chr','Start']).size()
return cts
def mergeRT(RTstopFiles,outfilename,threshold,expand,strand):
# Usage: Merge RT stops between replicates and keep only those positions that exceed threshold.
# Input: Files with RT stops for each replicate, outfile, threshold, strand, and bases to expand around RT stop.
# Output: None. Writes merged RT stop file.
cts_R1=RTcounts(RTstopFiles[0])
cts_R2=RTcounts(RTstopFiles[1])
m=pd.concat([cts_R1,cts_R2],axis=1,join='inner')
m.columns=['Rep_1','Rep_2']
m['Sum']=m['Rep_1']+m['Rep_2']
m_filter=m[m['Sum']>threshold]
f = open(outfilename, 'w')
for i in m_filter.index:
chrom=i[0]
RT=i[1]
count=m_filter.loc[i,'Sum']
if RT > expand:
read='\t'.join((chrom,str(int(RT)-expand),str(int(RT)+expand),'CLIPread','255',strand))+'\n'
else:
read='\t'.join((chrom,str(int(RT)),str(int(RT)+expand),'CLIPread','255',strand))+'\n'
f.write(read*(count))
print "Merge RT stops."
logOpen.write("Merge RT stops.\n")
posMerged=outfilepath+sampleName+'_repeat_positivereads.mergedRT'
strand='+'
mergeRT(positiveRTstop_rep,posMerged,threshold_rep,expand,strand)
negMerged=outfilepath+sampleName+'_repeat_negativereads.mergedRT'
strand='-'
mergeRT(negativeRTstop_rep,negMerged,threshold_rep,expand,strand)
negAndPosMerged=outfilepath+sampleName+'_threshold=%s'%threshold_rep+'_repeat_allreads.mergedRT.bed'
fileCat(negAndPosMerged,[posMerged,negMerged])
# <codecell>
print "Run mapping to %s."%index_tag
mappedReads,unmappedReads=runBowtie(unmappedReads_rep,index,index_tag)
# <codecell>
print "Run samtools."
logOpen.write("Run samtools.\n")
mappedBedFiles=runSamtools(mappedReads)
# <codecell>
def runRepeatMask(mappedReads,repeatregions):
# Usage: Remove repeat regions from bedfile following mapping.
# Input: .bed file after mapping (duplicates removed by samtools) and blastlist regions removed.
# Output: Bedfile with repeat regions removed.
program='intersectBed'
masked=[]
try:
for bedIn in mappedReads:
noRepeat=bedIn.replace('.bed','_noRepeat.bed')
outfh=open(noRepeat, 'w')
proc=subprocess.Popen([program,'-a',bedIn,'-b',repeatregions,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
masked=masked+[noRepeat]
return (masked)
except:
print "Problem with repeat masking."
logOpen.write("Problem with repeat masking.\n")
def runBlacklistRegions(mappedReads,blacklistregions):
# Usage: Remove blacklisted regions from bedfile following mapping.
# Input: .bed file after mapping (duplicates removed by samtools).
# Output: Bedfile with blacklisted regions removed.
program='intersectBed'
blackListed=[]
try:
for bedIn in mappedReads:
noBlacklist=bedIn.replace('.bed','_noBlacklist.bed')
outfh=open(noBlacklist, 'w')
proc=subprocess.Popen([program,'-a',bedIn,'-b',blacklistregions,'-v'],stdout=outfh)
proc.communicate()
outfh.close()
blackListed=blackListed+[noBlacklist]
return (blackListed)
except:
print "Problem with blacklist."
logOpen.write("Problem with blacklist.\n")
print "Run repeat and blacklist region masker."
logOpen.write("Run repeat and blacklist masker.\n")
blacklistedBedFiles=runBlacklistRegions(mappedBedFiles,blacklistregions)
maskedBedFiles=runRepeatMask(blacklistedBedFiles,repeatregions)
# <codecell>
print "RT stop isolation."
logOpen.write("RT stop isolation.\n")
readsByStrand=seperateStrands(maskedBedFiles)
negativeRTstop=isolate5prime(modifyNegativeStrand(readsByStrand[0]))
positiveRTstop=isolate5prime(readsByStrand[1])
print "Merge RT stops."
logOpen.write("Merge RT stops.\n")
posMerged=outfilepath+sampleName+'_%s_positivereads.mergedRT'%index_tag
strand='+'
mergeRT(positiveRTstop,posMerged,threshold,expand,strand)
negMerged=outfilepath+sampleName+'_%s_negativereads.mergedRT'%index_tag
strand='-'
mergeRT(negativeRTstop,negMerged,threshold,expand,strand)
negAndPosMerged=outfilepath+sampleName+'_threshold=%s'%threshold+'_%s_allreads.mergedRT.bed'%index_tag
fileCat(negAndPosMerged,[posMerged,negMerged])
# <codecell>
def runCLIPPER(RTclusterfile,genome,genomeFile):
# Useage: Process the mergedRT file and pass through CLIPper FDR script.
# Input: Merged RT file.
# Output: CLIPper input (.bed) file and output file.
program='bedToBam'
program2='samtools'
program3='bamToBed'
program4='clipper'
bamfile=RTclusterfile.replace('.bed','.bam')
outfh=open(bamfile, 'w')
proc=subprocess.Popen([program,'-i',RTclusterfile,'-g',genomeFile],stdout=outfh)
proc.communicate()
bamfile_sort=bamfile.replace('.bam','.srt')
proc2=subprocess.Popen([program2,'sort',bamfile,bamfile_sort])
proc2.communicate()
bamfile_sorted=bamfile_sort+'.bam'
mapStats=bamfile_sorted.replace('.srt.bam','.mapStats.txt')
outfh=open(mapStats, 'w')
proc3=subprocess.Popen([program2,'flagstat',bamfile_sorted],stdout=outfh)
proc3.communicate()
proc4=subprocess.Popen([program2,'index',bamfile_sorted])
proc4.communicate()
CLIPPERin=bamfile_sorted.replace('.srt.bam','_CLIPPERin.bed')
outfh=open(CLIPPERin, 'w')
proc5=subprocess.Popen([program3,'-i',bamfile_sorted],stdout=outfh)
proc5.communicate()
CLIPPERout=CLIPPERin.replace('_CLIPPERin.bed','_CLIP_clusters')
proc6=subprocess.Popen([program4,'--bam',bamfile_sorted,genome,'--outfile=%s'%CLIPPERout],)
proc6.communicate()
outfh.close()
return (CLIPPERin,CLIPPERout)
def makeGeneNameDict(fi):
# Usage: Make a dictionary that maps RT stop to gene name.
# Input: File path to intersected CLIPper windows and input RT stop coordinates.
# Output Dictionary mapping RT stop to name.
nameDict={}
with open(fi, 'r') as infile:
for read in infile:
elementList=read.strip().split('\t')
RT_id='_'.join((elementList[0],elementList[1],elementList[2],elementList[5]))
if RT_id not in nameDict:
geneName=elementList[9].strip().split(CLIPPERoutNameDelim)[0]
nameDict[RT_id]=geneName
return nameDict
def modCLIPPERout(CLIPPERin,CLIPPERout):
# Usage: Process the CLIPper output and isolate lowFDR reads based upon CLIPper windows.
# Input: .bed file passed into CLIPper and the CLIPper windows file.
# Output: Low FDR reads recovered using the CLIPer windows file, genes per cluster, gene list of CLIPper clusters, and CLIPper windows as .bed.
program='intersectBed'
CLIPperOutBed=CLIPPERout+'.bed'
CLIPpeReadsPerCluster=CLIPPERout+'.readsPerCluster'
CLIPpeGeneList=CLIPPERout+'.geneNames'
f = open(CLIPperOutBed,'w')
g = open(CLIPpeReadsPerCluster,'w')
h = open(CLIPpeGeneList,'w')
with open(CLIPPERout,'r') as infile:
for line in infile:
try:
# Note that different versions on CLIPper will report the gene name differently. So, we must handle this.
chrom,start,end,name,stats,strand,start_2,end_2 = line.strip().split('\t')
if CLIPPERoutNameDelim=='_':
readPerCluster=name.strip().split(CLIPPERoutNameDelim)[2]
else:
readPerCluster=(name.strip().split(CLIPPERoutNameDelim)[1]).split('_')[2]
geneName=name.strip().split(CLIPPERoutNameDelim)[0]
f.write('\t'.join((chrom,start,end,name,stats,strand))+'\n')
g.write((readPerCluster+'\n'))
h.write((geneName+'\n'))
except:
print ""
f.close()
g.close()
h.close()
# Intersect input reads with the CLIPper windows, report full result for both, include strand, do not duplicate reads from -a if they interset with multiple windows.
clusterWindowInt=CLIPperOutBed.replace('.bed','_fullClusterWindow.bed')
outfh=open(clusterWindowInt,'w')
proc=subprocess.Popen([program,'-a',CLIPPERin,'-b',CLIPperOutBed,'-wa','-wb','-s'],stdout=outfh)
proc.communicate()
outfh.close()
# Use the full window intersection to make a dictionary mapping RTstop to gene name.
nameDict=makeGeneNameDict(clusterWindowInt)
# Intersect input reads with CLIPper windows, but only report one intersection per read (as reads can overlap with multiple windows).
clusterWindowIntUniq=CLIPperOutBed.replace('.bed','_oneIntPerRead.bed')
outfh=open(clusterWindowIntUniq,'w')
proc=subprocess.Popen([program,'-a',CLIPPERin,'-b',CLIPperOutBed,'-wa','-s','-u'],stdout=outfh)
proc.communicate()
outfh.close()
# Process the uniquly intersected RT stops by adding gene name.
CLIPPERlowFDR=CLIPperOutBed.replace('.bed','_lowFDRreads.bed')
outfh=open(CLIPPERlowFDR,'w')
with open(clusterWindowIntUniq, 'r') as infile:
for read in infile:
bed=read.strip().split('\t')
RT_id='_'.join((bed[0],bed[1],bed[2],bed[5]))
geneName=nameDict[RT_id]
outfh.write('\t'.join((bed[0],bed[1],bed[2],geneName,bed[4],bed[5],'\n')))
outfh.close()
infile.close()
return (CLIPPERlowFDR,CLIPpeReadsPerCluster,CLIPpeGeneList,CLIPperOutBed)
print "Run CLIPper."
logOpen.write("Run CLIPper.\n")
CLIPPERio=runCLIPPER(negAndPosMerged,genomeForCLIPper,genomeFile)
CLIPPERin=CLIPPERio[0]
CLIPPERout=CLIPPERio[1]
clipperStats=modCLIPPERout(CLIPPERin,CLIPPERout)
CLIPPERlowFDR=clipperStats[0] # Low FDR reads returned filtred through CLIPper windows
CLIPpeReadsPerCluster=clipperStats[1] # Number of reads per CLIPper cluster
CLIPpeGeneList=clipperStats[2] # Gene names returned from the CLIPper file
CLIPperOutBed=clipperStats[3] # CLIPper windows as a bed file
# <codecell>
def getBedCenterPoints(inBed):
# Usage: Obtain ceter coordiantes of bedFile.
# Input: BedFile.
# Output: Center coodinates returned.
outBed=inBed.replace('.bed','_centerCoord.bed')
f=open(outBed, 'w')
with open(inBed, 'r') as infile:
for line in infile:
elementList=line.strip().split('\t')
f.write('\t'.join((elementList[0],str(int(elementList[1])+expand),str(int(elementList[1])+expand+1),elementList[3],elementList[4],elementList[5],'\n')))
f.close()
return outBed
def cleanBedFile(inBed):
# Usage: Sort and recover only first 6 fields from a bed file.
# Input: BedFile.
# Output: Sorted bedFile with correct number of fields.
program='sortBed'
CLIPperOutBed=inBed.replace('.bed','_cleaned.bed')
sortedBed=CLIPperOutBed.replace('_cleaned.bed','_cleaned_sorted.bed')
f=open(CLIPperOutBed, 'w')
with open(inBed, 'r') as infile:
for line in infile:
elementList=line.strip().split('\t')
f.write('\t'.join((elementList[0],elementList[1],elementList[2],elementList[3],elementList[4],elementList[5],'\n')))
f.close()
outfh=open(sortedBed, 'w')
proc=subprocess.Popen([program, '-i', CLIPperOutBed],stdout=outfh)
proc.communicate()
outfh.close()
return sortedBed
def makeBedGraph(lowFDRreads,sizesFile):
# Usage: From a bedFile, generate a bedGraph and bigWig.
# Input: BedFile.
# Output: BedGraph file.
program='genomeCoverageBed'
program2=os.getcwd() + '/bin/bedGraphToBigWig'
cleanBed=cleanBedFile(lowFDRreads)
outname=cleanBed.replace('.bed','.bedgraph')
outname2=cleanBed.replace('.bed','.bw')
outfh=open(outname,'w')
proc=subprocess.Popen([program,'-bg','-split','-i',cleanBed,'-g',sizesFile],stdout=outfh)
proc.communicate()
outfh2=open(outname2,'w')
proc2=subprocess.Popen([program2,outname,sizesFile,outname2],stdout=subprocess.PIPE)
proc2.communicate()
return outname
print "Make bedGraph"
logOpen.write("Make bedGraph.\n")
bedGraphCLIPout=makeBedGraph(CLIPPERlowFDR,genomeFile)
CLIPPERlowFDRcenters=getBedCenterPoints(CLIPPERlowFDR)
allLowFDRCentersBedGraph=makeBedGraph(CLIPPERlowFDRcenters,genomeFile)
# <codecell>
def filterSnoRNAs(proteinCodingReads,snoRNAmasker,miRNAmasker):
# Usage: Filter snoRNA and miRNAs from protein coding reads.
# Input: .bed file with protein coding reads.
# Output: snoRNA and miR filtered .bed file.
program='intersectBed'
proteinWithoutsnoRNAs=proteinCodingReads.replace('.bed','_snoRNAremoved.bed')
proteinWithoutmiRNAs=proteinWithoutsnoRNAs.replace('.bed','_miRNAremoved.bed')
outfh=open(proteinWithoutsnoRNAs, 'w')
proc=subprocess.Popen([program,'-a',proteinCodingReads,'-b',snoRNAmasker,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
outfh=open(proteinWithoutmiRNAs, 'w')
proc=subprocess.Popen([program,'-a',proteinWithoutsnoRNAs,'-b',miRNAmasker,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
return (proteinWithoutmiRNAs)
def getLowFDRReadTypes(CLIPPERlowFDR,pathToGeneLists):
# Usage: Given a list of genes, return all reads for the associated genes.
# Input: Gene list and the path to lowFDR read file.
# Output: List of reads assocaited with the given genes.
lowFDRgenelist=[]
for path in pathToGeneLists:
outfile=path+'_LowFDRreads.bed'
proc=subprocess.Popen('grep -F -f %s %s > %s'%(path,CLIPPERlowFDR,outfile),shell=True)
proc.communicate()
return_code=proc.wait() # *** Remove later. ***
lowFDRgenelist=lowFDRgenelist+[outfile]
return lowFDRgenelist
def compareLists(list1,list2,outname):
# Usage: Compare gene lists and output matches to the file.
# Input: Two gene lists.
# Output: Path file containing the matching genes.
f=open(list1,'r')
g=open(list2,'r')
commonGenes=set(f.readlines()) & set(g.readlines())
geneCategory=outname.split('.')[1]
outputName=outfilepath+'clipGenes_'+geneCategory
outfh=open(outputName,'w')
for gene in commonGenes:
outfh.write(gene)
outfh.close()
return outputName
def getLowFDRGeneTypes(CLIPpeGeneList,geneAnnot):
# Usage: Get all genes listed under each type, compare to CLIPper targets.
# Input: .bed file passed into CLIPper and the CLIPper windows file.
# Output: Path to file containing all CLIPper genes of each type.
geneTypes=[]
for genepath in geneAnnot:
lowFDRgenes=compareLists(CLIPpeGeneList,genepath,os.path.split(genepath)[1])
geneTypes=geneTypes+[lowFDRgenes]
return geneTypes
print "Partition reads by type."
logOpen.write("Partition reads by type.\n")
pathToGeneLists=getLowFDRGeneTypes(CLIPpeGeneList,geneAnnot)
pathToReadLists=getLowFDRReadTypes(CLIPPERlowFDR,pathToGeneLists)
proteinCodingReads=outfilepath+'clipGenes_proteinCoding_LowFDRreads.bed'
proteinBedGraph=makeBedGraph(proteinCodingReads,genomeFile)
filteredProteinCodingCenters=filterSnoRNAs(getBedCenterPoints(proteinCodingReads),snoRNAmasker,miRNAmasker)
filteredProteinCentersBedGraph=makeBedGraph(filteredProteinCodingCenters,genomeFile)
lincRNAReads=outfilepath+'clipGenes_lincRNA_LowFDRreads.bed'
filteredLincRNACenters=filterSnoRNAs(getBedCenterPoints(lincRNAReads),snoRNAmasker,miRNAmasker)
# <codecell>
# --- #
# <codecell>
def sortFilteredBed(bedFile):
bf=pd.DataFrame(pd.read_table(bedFile,header=None))
bf.columns=['Chr','Start','Stop','CLIPper_name','Q','Strand']
geneCounts=countHitsPerGene(bf)
return geneCounts
def countHitsPerGene(bf):
# *** THIS MAY DEPEND UPON THE VERSION OF CLIPPER USED ***
bf['geneName']=bf['CLIPper_name'].apply(lambda x: x.split('_')[0])
geneCounts=bf.groupby('geneName').size()
geneCounts.sort(ascending=False)
return geneCounts
def getSnoRNAreads(CLIPPERlowFDRcenters,snoRNAindex):
program='intersectBed'
bedFile=outfilepath+'clipGenes_snoRNA_LowFDRreads.bed'
outfh=open(bedFile, 'w')
proc=subprocess.Popen([program,'-a',CLIPPERlowFDRcenters,'-b',snoRNAindex,'-s','-wa','-wb'],stdout=outfh)
proc.communicate()
outfh.close()
return bedFile
def countSnoRNAs(bedFile_sno):
bf=pd.DataFrame(pd.read_table(bedFile_sno,header=None))
bf.columns=['Chr','Start','End','CLIPper_name','Q','Strand','Chr_snoRNA','Start_snoRNA','Stop_snoRNA','name_snoRNA','Type','strand_snoRNA']
geneCounts=bf.groupby('name_snoRNA').size()
geneCounts.sort(ascending=False)
return geneCounts
def countRemainingGeneTypes(remaining):
for bedFile in remaining:
try:
bf=pd.DataFrame(pd.read_table(bedFile,header=None))
bf.columns=['Chr','Start','End','ReadName','Q','Strand','CLIPper_winChr','CLIPper_winStart','CLIPper_winEmd','CLIPper_winaName','CLIPper_winP','CLIPper_winStrand']
# *** THIS MAY DEPEND UPON THE VERSION OF CLIPPER USED ***
bf['geneName']=bf['CLIPper_winaName'].apply(lambda x: x.split('_')[0])
geneCounts=bf.groupby('geneName').size()
geneCounts.sort(ascending=False)
head,fname=os.path.split(bedFile)
geneType=fname.split("_")[1]
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_%s'%geneType
geneCounts.to_csv(outfilepathToSave)
except ValueError:
print "No reads in %s"%bedFile
print "Generate sorted gene lists by gene type."
logOpen.write("Generate sorted gene lists by gene type.\n")
bedFile_pc=outfilepath+"clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed"
geneCounts_pc=sortFilteredBed(bedFile_pc)
outfilepathToSave=outfilepath + '/PlotData_ReadsPerGene_proteinCoding'
geneCounts_pc.to_csv(outfilepathToSave)
bedFile_linc=outfilepath+"clipGenes_lincRNA_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed"
geneCounts_linc=sortFilteredBed(bedFile_linc)
outfilepathToSave=outfilepath + '/PlotData_ReadsPerGene_lincRNA'
geneCounts_linc.to_csv(outfilepathToSave)
CLIPPERlowFDRcenters=getBedCenterPoints(CLIPPERlowFDR)
allLowFDRCentersBedGraph=makeBedGraph(CLIPPERlowFDRcenters,genomeFile)
bedFile_sno=getSnoRNAreads(CLIPPERlowFDRcenters,snoRNAindex)
geneCounts_sno=countSnoRNAs(bedFile_sno)
outfilepathToSave=outfilepath + '/PlotData_ReadsPerGene_snoRNA'
geneCounts_sno.to_csv(outfilepathToSave)
remaining=[f for f in glob.glob(outfilepath+"*_LowFDRreads.bed") if 'lincRNA' not in f and 'proteinCoding' not in f and 'snoRNA' not in f]
countRemainingGeneTypes(remaining)
# <codecell>
def makeClusterCenter(windowsFile):
# Usage: Generate a file of cluster centers.
# Input: Raw CLIPper output file.
# Output: File with coordinates for the center of each CLIPper cluster.
cleanBed = cleanBedFile(windowsFile)
centers=cleanBed.replace('.bed','.clusterCenter')
f = open(centers, 'w')
with open(cleanBed, 'r') as infile:
for line in infile:
elementList = line.strip().split('\t')
diff=abs(int((int(elementList[1])-int(elementList[2]))/2))
f.write(elementList[0]+'\t'+str(int(elementList[1])+diff)+'\t'+str(int(elementList[1])+diff+1)+'\n')
f.close()
return centers
def getClusterIntensity(bedGraph,centerCoordinates):
# Usage: Generate a matrix of read itensity values around CLIPper cluster center.
# Input: BedGraph and cluster center file.
# Output: Generates a matrix, which is passed into R.
program=os.getcwd() + '/bin/grep_chip-seq_intensity.pl'
program2='wait'
proc=subprocess.Popen(['perl',program, centerCoordinates, bedGraph],)
proc.communicate()
logOpen.write("Waiting for Cluster Intensity file completion...\n")
proc2=subprocess.Popen(program2,shell=True)
proc2.communicate()
print "Get binding intensity around cluster centers."
logOpen.write("Get binding intensity around cluster centers.\n")
bedGraphCLIPin=makeBedGraph(CLIPPERin,genomeFile)
centerCoordinates=makeClusterCenter(CLIPperOutBed)
getClusterIntensity(bedGraphCLIPin,centerCoordinates)
# <codecell>
def partitionReadsByUTR(infile,UTRmask,utrReads,notutrReads):
program = 'intersectBed'
outfh = open(utrReads,'w')
proc = subprocess.Popen([program,'-a',infile,'-b',UTRmask,'-u','-s'],stdout=outfh)
proc.communicate()
outfh.close()
outfh = open(notutrReads,'w')
proc = subprocess.Popen([program,'-a',infile,'-b',UTRmask,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
def extractUTRs(bedIn,fivePUTRBed,threePUTRBed,cdsBed):
# Usage: Extract all UTR specific reads from the input file.
# Input: .bed file
# Output: Mutually exclusive partitions of the input file.
fivePreads = bedIn.replace('.bed', '_5p.bed')
notFivePreads = bedIn.replace('.bed', '_NOT5p.bed')
partitionReadsByUTR(bedIn,fivePUTRBed,fivePreads,notFivePreads)
threePreads = bedIn.replace('.bed', '_3p.bed')
notThreePreads = bedIn.replace('.bed', '_NOT3p.bed')
partitionReadsByUTR(notFivePreads,threePUTRBed,threePreads,notThreePreads)
CDSreads = bedIn.replace('.bed', '_cds.bed')
notCDSreads = bedIn.replace('.bed', '_NOTcds.bed')
partitionReadsByUTR(notThreePreads,cdsBed,CDSreads,notCDSreads)
return (fivePreads,notFivePreads,CDSreads,notCDSreads,threePreads,notThreePreads)
print "Intron and UTR analysis."
logOpen.write("Intron and UTR analysis.\n")
fivePreads,notFivePreads,CDSreads,notCDSreads,threePreads,notThreePreads=extractUTRs(filteredProteinCodingCenters,fivePUTRBed,threePUTRBed,cdsBed)
geneCounts_5p=sortFilteredBed(fivePreads)
geneCounts_3p=sortFilteredBed(threePreads)
geneCounts_cds=sortFilteredBed(CDSreads)
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_5pUTR'
geneCounts_5p.to_csv(outfilepathToSave)
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_3pUTR'
geneCounts_3p.to_csv(outfilepathToSave)
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_CDS'
geneCounts_cds.to_csv(outfilepathToSave)
# <codecell>
def makeTab(bedGraph,genesFile,sizesFile):
program = os.getcwd() + '/bin/bedGraph2tab.pl'
program2 = 'wait'
outfile=bedGraph.replace('.bedgraph','.tab')
proc = subprocess.Popen(['perl',program,genesFile,sizesFile,bedGraph,outfile],)
proc.communicate()
proc2 = subprocess.Popen(program2,shell=True)
proc2.communicate()
return outfile
def makeAvgGraph(bedGraph,utrFile,genesFile,sizesFile):
# Usage: Generate a matrix of read itensity values across gene body.
# Input: BedGraph.
# Output: Generates two matricies.
program= os.getcwd() + '/bin/averageGraph_scaled_tab.pl'
program2 = 'wait'
tabFile=makeTab(bedGraph,genesFile,sizesFile)
outhandle=tabFile.replace('.tab','_UTRs')
proc = subprocess.Popen(['perl',program,utrFile,tabFile,tabFile,outhandle],)
proc.communicate()
proc2 = subprocess.Popen(program2,shell=True)
proc2.communicate()
print "Gene body analysis."
logOpen.write("Gene body analysis.\n")
bedGraphProtein=makeBedGraph(bedFile_pc,genomeFile)
makeAvgGraph(bedGraphProtein,utrFile,genesFile,sizesFile)
# <codecell>
def getGeneStartStop(bedFile,geneRef):
try:
bf=pd.DataFrame(pd.read_table(bedFile,header=None))
bf.columns=['Chr','Start','End','ReadName','Q','Strand','CLIPper_winChr','CLIPper_winStart','CLIPper_winEmd','CLIPper_winaName','CLIPper_winP','CLIPper_winStrand']
bf['geneName']=bf['CLIPper_winaName'].apply(lambda x: x.split('_')[0])
merge=pd.merge(geneRef,bf,left_on='Ensembl Gene ID',right_on='geneName')
ncRNA_startStop=merge[['Ensembl Gene ID','Gene Start (bp)','Gene End (bp)','Start','End','Strand']]
outfilepathToSave=bedFile.replace(".bed",".geneStartStop")
ncRNA_startStop.to_csv(outfilepathToSave)
except ValueError:
print "No reads in %s"%bedFile
print "ncRNA gene body anaysis."
geneStartStopRepo=os.getcwd()+'/docs/all_genes.txt'
geneRef=pd.DataFrame(pd.read_table(geneStartStopRepo))
remaining=[f for f in glob.glob(outfilepath+"*_LowFDRreads.bed") if 'lincRNA' not in f and 'proteinCoding' not in f and 'snoRNA' not in f]
for bedFile in remaining:
st_stop=getGeneStartStop(bedFile,geneRef)
# lincRNA file processing
bedFile_linc=outfilepath+"clipGenes_lincRNA_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed"
bf=pd.DataFrame(pd.read_table(bedFile_linc,header=None))
bf.columns=['Chr','Start','Stop','CLIPper_name','Q','Strand']
bf['geneName']=bf['CLIPper_name'].apply(lambda x: x.split('_')[0])
merge=pd.merge(geneRef,bf,left_on='Ensembl Gene ID',right_on='geneName')
ncRNA_startStop=merge[['Ensembl Gene ID','Gene Start (bp)','Gene End (bp)','Start','Stop','Strand']]
outfilepathToSave=bedFile_linc.replace(".bed",".geneStartStop")
ncRNA_startStop.to_csv(outfilepathToSave)
# <codecell>
def makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation):
repeat_genome=np.genfromtxt(repeatGenomeBuild,dtype='string')
repeat_genome_bases=repeat_genome[1]
repeat_genome_size=len(repeat_genome[1])
repeatAnnotDF=pd.DataFrame(pd.read_table(repeatAnnotation,header=None))
repeatAnnotDF.columns=['Name','Length','IndexStart','IndexEnd']
repeatAnnotDF['End_for_extraction']=repeatAnnotDF['IndexEnd']+1 # Python list extraction is not end index inclusive; to extract sequence, use end + 1.
return (repeat_genome_bases,repeatAnnotDF)
def readBed(path):
bedFile = pd.read_table(path,dtype=str,header=None)
bedFile.columns=['Index','Start','Stop','Name','QS','Strand']
bedFile['Start']=bedFile['Start'].astype(int)
return bedFile
print "Record repeat RNA."
repeat_genome_bases,repeatAnnotDF=makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation)
repeatAnnotDF.set_index('Name',inplace=True,drop=False)
# Get merged data for repeat index.
repeatMerged=glob.glob(outfilepath+"*repeat_allreads.mergedRT.bed")
rep=pd.read_table(repeatMerged[0],dtype=str,header=None)
rep.columns=['Rep_index','Start','Stop','Read_name','Q','Strand']
rep['RT_stop']=rep['Start'].astype(int)+expand
for ix in repeatAnnotDF.index:
end=repeatAnnotDF.loc[ix,'IndexEnd']
repName=repeatAnnotDF.loc[ix,'Name']
gene_hits=rep[(rep['RT_stop']<int(repeatAnnotDF.loc[ix,'IndexEnd']))&(rep['RT_stop']>int(repeatAnnotDF.loc[ix,'IndexStart']))]
gene_hits['Repeat_End']=repeatAnnotDF.loc[ix,'IndexEnd']
gene_hits['Repeat_Start']=repeatAnnotDF.loc[ix,'IndexStart']
outfilepathToSave=outfilepath + '/PlotData_RepeatRNAreads_%s'%repName
gene_hits.to_csv(outfilepathToSave)
# <codecell>
def makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation):
repeat_genome=np.genfromtxt(repeatGenomeBuild,dtype='string')
repeat_genome_bases=repeat_genome[1]
repeat_genome_size=len(repeat_genome[1])
repeatAnnotDF=pd.DataFrame(pd.read_table(repeatAnnotation,header=None))
repeatAnnotDF.columns=['Name','Length','IndexStart','IndexEnd']
repeatAnnotDF['End_for_extraction']=repeatAnnotDF['IndexEnd']+1 # Python list extraction is not end index inclusive; to extract sequence, use end + 1.
return (repeat_genome_bases,repeatAnnotDF)
repeat_genome_bases,repeatAnnotDF=makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation)
# <codecell>
def lineCount(filename):
i=0
with open(filename) as f:
for i,l in enumerate(f):
pass
return i+1
def plot_ReadAccounting(outfilepath,sampleName):
rawRead1=infilepath+sampleName+'_R1.fastq'
rawRead2=infilepath+sampleName+'_R2.fastq'
reads3pTrim=[outfilepath+sampleName+'_R1_3ptrimmed.fastq',outfilepath+sampleName+'_R2_3ptrimmed.fastq']
readsFilter=[outfilepath+sampleName+'_R1_3ptrimmed_filter.fastq',outfilepath+sampleName+'_R2_3ptrimmed_filter.fastq']
readsNoDupes=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe.fastq',outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe.fastq']
readsMappedReapeat=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_mappedTorepeat_withDupes.bed',outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_mappedTorepeat_withDupes.bed']
readsMappedHg19=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag,outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag]
readsMappedBlacklist=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag,outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag]
readsMappedRepeatMask=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes_noBlacklist_noRepeat.bed'%index_tag,outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes_noBlacklist_noRepeat.bed'%index_tag]
clipperIN=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIPPERin.bed'%(threshold,index_tag)
clipperOUT=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters_lowFDRreads.bed'%(threshold,index_tag)
fileNames=['Raw (R1)','Raw (R2)','3p Trim (R1)','3p Trim (R2)','Filter (R1)','Filter (R2)','No dupes (R1)','No dupes (R2)','RepeatMapped (R1)','RepeatMapped (R2)','Hg19Mapped (R1)','Hg19Mapped (R2)','Blacklist (R1)','Blacklist (R2)','RepeatMask (R1)','RepeatMask (R2)','ClipperIn','ClipperOut']
filesToCount=[rawRead1,rawRead2,reads3pTrim[0],reads3pTrim[1],readsFilter[0],readsFilter[1],readsNoDupes[0],readsNoDupes[1],readsMappedReapeat[0],readsMappedReapeat[1],readsMappedHg19[0],readsMappedHg19[1],readsMappedBlacklist[0],readsMappedBlacklist[1],readsMappedRepeatMask[0],readsMappedRepeatMask[1],clipperIN,clipperOUT]
counts=[]
counter=0
for fileString in filesToCount:
temp=lineCount(fileString)
if counter < 8:
temp=temp/4 # Fastq files
counts=counts+[temp]
counter += 1
ind = np.arange(len(counts)) + 0.5
plt.barh(ind,list(reversed(np.log10(np.array(counts)))),align='center',color='blue')
plt.xlabel('log10(Counts per file)',fontsize=5)
locs,pltlabels = plt.xticks(fontsize=5)
plt.setp(pltlabels, rotation=90, fontsize=5)
plt.yticks(ind,list(reversed(fileNames)),fontsize=5)
plt.tick_params(axis='yticks',labelsize=5)
ax=plt.gca()
for line in ax.get_yticklines():
line.set_markersize(0)
plt.title('Read counts',fontsize=5)
readDF=pd.DataFrame()
readDF['File_name']=fileNames
readDF['Reads_per_file']=counts
outfilepathToSave=outfilepath + '/PlotData_ReadsPerPipeFile'
readDF.to_csv(outfilepathToSave)
plt.subplot(2,3,1)
plot_ReadAccounting(outfilepath,sampleName)
# <codecell>
def plot_BoundGeneTypes(outfilepath,sampleName):
record=pd.DataFrame()
# Exclude specific files (e.g., UTR-specific reads).
geneListToPlot=[f for f in glob.glob(outfilepath+'PlotData_ReadsPerGene_*') if '5pUTR' not in f and '3pUTR' not in f and 'CDS' not in f]
for boundGenes in geneListToPlot:
glist=pd.read_csv(boundGenes,header=None)
glist.columns=['GeneName','Count']
gName=boundGenes.split('_')[-1]
record.loc[gName,'genesBound']=glist.shape[0]
record.loc[gName,'totalReads']=glist['Count'].sum()
record.sort('genesBound',inplace=True)
outfilepathToSave=outfilepath + '/PlotData_ReadAndGeneCountsPerGenetype'
record.to_csv(outfilepathToSave)
ind = np.arange(record.shape[0]) + 0.5
plt.bar(ind,record['genesBound'],align='center',color='blue')
locs,pltlabels = plt.yticks(fontsize=5)
locs,pltlabels = plt.xticks(ind,record.index,fontsize=5)
plt.setp(pltlabels, rotation=90, fontsize=5)
plt.tick_params(axis='xticks',labelsize=5)
ax=plt.gca()
for line in ax.get_xticklines():
line.set_markersize(0)
plt.ylabel('Number of genes bound',fontsize=5)
plt.tick_params(axis='yticks',labelsize=5)
plt.title('Bound genes by class',fontsize=5)
plt.subplot(2,3,6)
plot_BoundGeneTypes(outfilepath,sampleName)
# <codecell>
def plot_ReadsPerCluster(outfilepath,sampleName):
readPerCluster=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters.readsPerCluster'%(threshold,index_tag)
clust=pd.DataFrame(pd.read_table(readPerCluster,header=None))
clust.columns=['ReadsPerCluster']
clust=clust['ReadsPerCluster']
interval=10
bins=range(min(clust)-10,max(clust)+10,interval)
hist,bins=np.histogram(clust,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
plt.bar(center, hist,align='center',width=width)
locs,pltlabels = plt.yticks(fontsize=5)
locs,pltlabels = plt.xticks(center,center,fontsize=5)
plt.setp(pltlabels, rotation=90, fontsize=3.5)
plt.tick_params(axis='yticks',labelsize=5)
plt.xlabel('Reads per cluster (bin=%s)'%interval,fontsize=5)
plt.ylabel('Frequency (RT stop count)',fontsize=5)
plt.title('Reads per cluster',fontsize=5)
plt.xlim(0,100) # Make the histogram easy to view.
# plt.xlim(-interval,np.max(center)+interval)
plt.subplot(2,3,2)
plot_ReadsPerCluster(outfilepath,sampleName)
# <codecell>
def plot_ClusterSizes(outfilepath,sampleName):
clipClusters=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters'%(threshold,index_tag)
clust=pd.DataFrame(pd.read_table(clipClusters,header=None,skiprows=1))
clust.columns=['chr','start','end','name','score','strand','m1','m2']
clust['clusterSize']=clust['start']-clust['end']
clust['clusterSize']=clust['clusterSize'].apply(lambda x: math.fabs(x))
plt.boxplot(clust['clusterSize'])
plt.tick_params(axis='x',labelbottom='off')
ax=plt.gca()
for line in ax.get_xticklines():
line.set_markersize(0)
plt.ylabel('Cluster length (bases)',fontsize=5)
locs,pltlabels = plt.yticks(fontsize=5)
plt.title('Cluster size',fontsize=5)
plt.subplot(2,3,3)
plot_ClusterSizes(outfilepath,sampleName)
# <codecell>
def plot_clusterBindingIntensity(outfilepath,sampleName):
clusterCenterHeatmap=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters_cleaned_sorted.clusterCenter_heatmap.txt'%(threshold,index_tag)
hmap=pd.DataFrame(pd.read_table(clusterCenterHeatmap,header=None,skiprows=1))
hmap_vals=hmap.ix[:,1:]
sums=hmap_vals.sum(axis=1)
hmap_vals=hmap_vals.loc[np.argsort(sums),:]
plt.ylim(0,hmap_vals.shape[0])
p=plt.pcolormesh(np.array(hmap_vals),cmap='Blues')
plt.tick_params(axis='x',labelbottom='off')
plt.xlabel('Cluster position',fontsize=5)
locs,pltlabels = plt.yticks(fontsize=5)
plt.ylabel('Cluster number',fontsize=5)
plt.title('Read distribution',fontsize=5)
plt.subplot(2,3,4)
plot_clusterBindingIntensity(outfilepath,sampleName)
# <codecell>
def readUTRfile(path):
geneCounts=pd.read_csv(path,header=None)
geneCounts.columns=['Gene_name','Count']
return geneCounts
def plot_readsBymRNAregion(outfilepath,sampleName):
pc_5pReads=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_5pUTR')['Count'].sum()
pc_3pReads=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_3pUTR')['Count'].sum()
pc_CDSReads=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_CDS')['Count'].sum()
non_intronic=pc_5pReads+pc_3pReads+pc_CDSReads
allProteinCoding=outfilepath +'clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed'
all_pc=pd.DataFrame(pd.read_table(allProteinCoding,header=None))
pc_allReads=all_pc.shape[0]
v=[float(pc_allReads-non_intronic)/pc_allReads,float(pc_5pReads)/pc_allReads,float(pc_CDSReads)/pc_allReads,float(pc_3pReads)/pc_allReads]
pie_wedges=ax.pie(v,labels=["Intronic","5p UTR","CDS","3pUTR"],labeldistance=1.1,autopct='%1.1f%%')
plt.rcParams['font.size']=5
for wedge in pie_wedges[0]:
wedge.set_edgecolor('black')
wedge.set_lw(1)
ax=plt.subplot(2,3,5)
plot_readsBymRNAregion(outfilepath,sampleName)
# <codecell>
fig1=plt.figure(1)
plt.subplot(2,3,1)
plot_ReadAccounting(outfilepath,sampleName)
plt.subplot(2,3,2)
plot_ReadsPerCluster(outfilepath,sampleName)
plt.subplot(2,3,3)
plot_ClusterSizes(outfilepath,sampleName)
plt.subplot(2,3,4)
plot_clusterBindingIntensity(outfilepath,sampleName)
ax=plt.subplot(2,3,5)
plot_readsBymRNAregion(outfilepath,sampleName)
plt.subplot(2,3,6)
plot_BoundGeneTypes(outfilepath,sampleName)
fig1.tight_layout()
fig1.savefig(outfilepath+'Figure1.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig1.savefig(outfilepath+'Figure1.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def plot_mRNAgeneBodyDist(outfilepath,sampleName):
averageGraph=outfilepath+'clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved_cleaned_sorted_UTRs_scaled_cds200_abt0_averageGraph.txt'
hmap=pd.DataFrame(pd.read_table(averageGraph,header=None,skiprows=1))
hmap=hmap.set_index(0)
avgTrace=hmap.loc['treat',:]
plt.plot(avgTrace,color='blue',linewidth='2')
plt.vlines(200,0,np.max(avgTrace),linestyles='dashed')
plt.vlines(400,0,np.max(avgTrace),linestyles='dashed')
plt.ylim(0,np.max(avgTrace))
plt.tick_params(axis='x',labelbottom='off')
plt.xlabel('mRNA gene body (5pUTR, CDS, 3pUTR)')
plt.ylabel('Read density')
plt.tick_params(axis='y',labelsize=5)
plt.title('CLIP signal across average mRNA transcript.',fontsize=5)
plt.subplot2grid((2,3),(0,0),colspan=3)
plot_mRNAgeneBodyDist(outfilepath,sampleName)
# <codecell>
def convertENBLids(enst_name):
ensg_name=ensemblGeneAnnot.loc[enst_name,'name2']
return ensg_name
def getUTRbindingProfile(utr,hmap_m):
if utr=='5p':
ix=(hmap_m[range(201,601)].sum(axis=1)==0)&(hmap_m[range(1,201)].sum(axis=1)>0)
screen=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_5pUTR')
elif utr=='3p':
ix=(hmap_m[range(1,401)].sum(axis=1)==0)&(hmap_m[range(401,601)].sum(axis=1)>0)
screen=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_3pUTR')
else:
ix=(hmap_m[range(1,201)].sum(axis=1)==0)&(hmap_m[range(401,601)].sum(axis=1)==0)&(hmap_m[range(201,401)].sum(axis=1)>0)
screen=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_CDS')
# Ensure all genes are also identified in pre-allocated gene lists.
hmap_m_utrSpec=hmap_m.ix[ix,:]
hmap_m_utrSpec_filter=pd.merge(hmap_m_utrSpec,screen,left_on='ENSG_ID',right_on='Gene_name',how='inner')
sums=hmap_m_utrSpec_filter[range(1,601)].sum(axis=1)
hmap_m_utrSpec_filter=hmap_m_utrSpec_filter.loc[np.argsort(sums),:]
return hmap_m_utrSpec_filter
def plot_geneBodyPartition(outfilepath,sampleName):
treatMatrix=outfilepath+'clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved_cleaned_sorted_UTRs_scaled_cds200_abt0_treatmatrix.txt'
hmap=pd.DataFrame(pd.read_table(treatMatrix,header=None,skiprows=1))
# Ensure genes recoverd from this analysis are indepdently indentified using partitioning of CLIPper cluster data.
hmap['ENSG_ID']=hmap.ix[:,0].apply(convertENBLids)
bound_pc = outfilepath+'clipGenes_proteinCoding'
pc_genes=pd.DataFrame(pd.read_table(bound_pc,header=None,))
pc_genes.columns=['ENSG_ID']
hmap_m=pd.merge(hmap,pc_genes,left_on='ENSG_ID',right_on='ENSG_ID',how='inner')
# Isolate intronic bound genes.
tosave=outfilepath+'PlotData_ExclusiveBound_Intronic'
intronicBoundGenes=list(set(pc_genes['ENSG_ID'])-set(hmap_m['ENSG_ID']))
np.savetxt(tosave,np.array(intronicBoundGenes),fmt="%s")
# UTR specific genes.
geneTypes=['5p','cds','3p']
depth=50
for i in range(0,3):
utrMatrix=getUTRbindingProfile(geneTypes[i],hmap_m)
tosave=outfilepath+'PlotData_ExclusiveBound_%s'%geneTypes[i]
np.savetxt(tosave,utrMatrix['ENSG_ID'],fmt="%s")
plt.subplot2grid((2,3),(1,i),colspan=1)
dataToPlot=utrMatrix[range(1,601)]
p=plt.pcolormesh(np.array(dataToPlot)[-depth:-1,:],cmap='Blues')
plt.title(geneTypes[i],fontsize=5)
plt.vlines(200,0,depth,linestyles='dashed')
plt.vlines(400,0,depth,linestyles='dashed')
plt.tick_params(axis='x',labelbottom='off')
plt.tick_params(axis='y',labelleft='off')
plt.ylim(0,depth)
plt.ylabel('Ranked genes (highest on bottom)',fontsize=5)
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.title('%s specific genes: %s'%(geneTypes[i],np.unique(utrMatrix['ENSG_ID']).shape[0]),fontsize=5)
ensemblGeneAnnot=pd.DataFrame(pd.read_table(genesFile))
ensemblGeneAnnot=ensemblGeneAnnot.set_index('name') # Make ENST the index
plot_geneBodyPartition(outfilepath,sampleName)
# <codecell>
fig2=plt.figure(2)
plt.subplot2grid((2,3),(0,0),colspan=3)
plot_mRNAgeneBodyDist(outfilepath,sampleName)
plot_geneBodyPartition(outfilepath,sampleName)
fig2.tight_layout()
fig2.savefig(outfilepath+'Figure2.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig2.savefig(outfilepath+'Figure2.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def plot_repeatRNA(outfilepath,sampleName):
repeat_genome=np.genfromtxt(repeatGenomeBuild,dtype='string')
repeat_genome_bases=repeat_genome[1]
repFiles=glob.glob(outfilepath + '/PlotData_RepeatRNAreads_*')
repFiles=[repFile for repFile in repFiles if 'rDNA' not in repFile]
plotDim=math.ceil(math.sqrt(len(repFiles)))
i=0
for path in repFiles:
name=path.split('RepeatRNAreads_')[-1]
try:
# Read in each RT stop file
hits_per_rep=pd.read_csv(path)
RTpositions=hits_per_rep['RT_stop']
start=hits_per_rep.loc[0,'Repeat_Start']
end=hits_per_rep.loc[0,'Repeat_End']
# Histogram of RT stops across gene body
bins=range(start,end+2,1)
hist,bins=np.histogram(RTpositions,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
# Normalize
histPlot=np.array(hist,dtype=float)
histPlot=np.array(histPlot/float(len(RTpositions)),dtype=float)
# Subplot
plt.subplot(plotDim,plotDim,i+1)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.45)
plt.tick_params(axis='x',labelsize=2.5)
plt.tick_params(axis='y',labelsize=2.5)
plt.title('RT stops for %s: %s'%(name,len(RTpositions)),fontsize=5)
plt.xlim(start,end)
# Record data
storageDF=pd.DataFrame()
sequence=repeat_genome_bases[start:end+1]
storageDF['Sequence']=pd.Series(list(sequence))
readsPerBase=np.array(list(hist))
readsPerBaseNorm=np.array(list(histPlot))
storageDF['RT_stops']=readsPerBase
storageDF['RT_stops_norm']=readsPerBaseNorm
outfilepathToSave=outfilepath +'/PlotData_RepeatRNAHist_%s'%name
storageDF.to_csv(outfilepathToSave)
i+=1
except:
print "No reads for repeatRNA %s"%name
plt.tight_layout()
fig3=plt.figure(3)
plot_repeatRNA(outfilepath,sampleName)
fig3.tight_layout()
fig3.savefig(outfilepath+'Figure3.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig3.savefig(outfilepath+'Figure3.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def plot_rDNA(outfilepath,sampleName):
plt.subplot2grid((3,3),(0,0),colspan=3)
name='rDNA'
rDNA=glob.glob(outfilepath + 'PlotData_RepeatRNAreads_rDNA')
hits_per_rep=pd.read_csv(rDNA[0])
RTpositions=hits_per_rep['RT_stop']
start=hits_per_rep.loc[0,'Repeat_Start']
end=hits_per_rep.loc[0,'Repeat_End']
bins=range(start,end+2,1)
hist,bins=np.histogram(RTpositions,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1]+bins[1:])/2
histPlot=np.array(hist,dtype=float)
histPlot=np.array(histPlot/float(len(RTpositions)),dtype=float)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.45)
plt.tick_params(axis='x',labelsize=2.5)
plt.tick_params(axis='y',labelsize=2.5)
plt.title('RT stops for %s: %s'%(name,len(RTpositions)),fontsize=5)
plt.xlim(start,end)
# Record data
storageDF=pd.DataFrame()
sequence=repeat_genome_bases[start:end+1]
storageDF['Sequence']=pd.Series(list(sequence))
readsPerBase=np.array(list(hist))
readsPerBaseNorm=np.array(list(histPlot))
storageDF['RT_stops']=readsPerBase
storageDF['RT_stops_norm']=readsPerBaseNorm
outfilepathToSave=outfilepath +'/PlotData_RepeatRNAHist_%s'%name
storageDF.to_csv(outfilepathToSave)
# Features of rDNA with respect to start of the bowtie index (index=0)
rRNAstart=start
plt.axvspan(start18s+rRNAstart,end18s+rRNAstart,facecolor='g',alpha=0.5)
plt.axvspan(start5s+rRNAstart,end5s+rRNAstart,facecolor='r',alpha=0.5)
plt.axvspan(start28s+rRNAstart,end28s+rRNAstart,facecolor='b',alpha=0.5)
# Generate histogram for transcribed region
plt.subplot2grid((3,3),(1,0),colspan=3)
datarDNAOnly=RTpositions-start
bins=range((start-start),(end-start+2),1)
hist,bins=np.histogram(datarDNAOnly,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
histPlot=np.array(hist,dtype=float)
histPlot=np.array(histPlot/float(len(RTpositions)),dtype=float)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.45)
plt.tick_params(axis='x',labelsize=2.5)
plt.tick_params(axis='y',labelsize=2.5)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.ylabel('Normalized RT stop / bin',fontsize=2.5)
plt.axvspan(start18s,end18s,facecolor='g',alpha=0.5)
plt.axvspan(start5s,end5s,facecolor='r',alpha=0.5)
plt.axvspan(start28s,end28s,facecolor='b',alpha=0.5)
plt.xlim(0,rRNAend)
# Individual regions
plt.subplot2grid((3,3),(2,0),colspan=1)
plt.bar(center,histPlot,align='center',width=width,color='green',alpha=0.75)
plt.xlim(start18s,end18s)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.ylabel('Normalized RT stop / bin',fontsize=2.5)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.title('18s Region',fontsize=5)
plt.subplot2grid((3,3),(2,1),colspan=1)
plt.bar(center,histPlot,align='center',width=width,color='red',alpha=0.75)
plt.xlim(start5s,end5s)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.title('5.8s Region',fontsize=5)
plt.subplot2grid((3,3),(2,2),colspan=1)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.75)
plt.xlim(start28s,end28s)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.title('28s Region',fontsize=5)
plt.tight_layout()
fig4=plt.figure(4)
plot_rDNA(outfilepath,sampleName)
fig4.tight_layout()
fig4.savefig(outfilepath+'Figure4.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig4.savefig(outfilepath+'Figure4.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def getBindingFrac(type_specific):
# 5' position on the negative strand is snoRNA stop coordinate.
neg_data=type_specific[type_specific['strand_snoRNA']=='-']
neg_data['diff']=np.abs(neg_data['Stop_snoRNA']-neg_data['Start'])
neg_data['frac']=neg_data['diff']/(neg_data['Stop_snoRNA']-neg_data['Start_snoRNA'])
# 5' position on the positive strand is snoRNA start coordinate.
pos_data=type_specific[type_specific['strand_snoRNA']=='+']
pos_data['diff']=np.abs(pos_data['Start_snoRNA']-pos_data['Start'])
pos_data['frac']=pos_data['diff']/(pos_data['Stop_snoRNA']-pos_data['Start_snoRNA'])
DF_snoProfile=pd.concat([neg_data,pos_data])
return DF_snoProfile
print "snoRNA gene body anaysis."
# logOpen.write("Gene body analysis.\n")
bf_sno=pd.read_table(outfilepath+"clipGenes_snoRNA_LowFDRreads.bed",header=None)
bf_sno.columns=['Chr','Start','End','CLIPper_name','Q','Strand','Chr_snoRNA','Start_snoRNA','Stop_snoRNA','name_snoRNA','Type','strand_snoRNA']
snoTypes=pd.DataFrame(bf_sno.groupby('Type').size())
snoTypes.columns=['Reads']
snoTypes['Fraction']=snoTypes['Reads']/snoTypes['Reads'].sum(axis=1)
outfilepathToSave=outfilepath+'/PlotData_readsPerSnoRNAType'
snoTypes.to_csv(outfilepathToSave)
snoTypesAndGenes=pd.DataFrame(bf_sno.groupby(['Type','name_snoRNA']).size())
snoTypesAndGenes.columns=['Count_per_gene']
outfilepathToSave=outfilepath+'/PlotData_geneStatsPerSnoRNAType'
snoTypesAndGenes.to_csv(outfilepathToSave)
fig5=plt.figure(5)
ax=plt.subplot(2,2,1)
pie_wedges=ax.pie(snoTypes['Fraction'],labels=snoTypes.index,labeldistance=1.1,autopct='%1.1f%%')
plt.rcParams['font.size']=5
for wedge in pie_wedges[0]:
wedge.set_edgecolor('black')
wedge.set_lw(1)
i=2
for sType in set(bf_sno['Type']):
type_specific=bf_sno[bf_sno['Type']==sType]
sno_profile=getBindingFrac(type_specific)
if sType=='C':
title="C/D_box"
elif sType=='H':
title="H/ACA_box"
else:
title="scaRNA"
outfilepathToSave=outfilepath+'/PlotData_snoRNAReadDist_%s'%sType
sno_profile.to_csv(outfilepathToSave)
plt.subplot(2,2,i)
bins=np.arange(0,1,0.01)
hist,bins=np.histogram(sno_profile['frac'],bins=bins)
hist=np.array(hist/float(sno_profile['frac'].shape[0]),dtype=float)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
plt.bar(center,hist,align='center',width=width,color='blue',alpha=0.75)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.xlabel('Fraction of gene body (5p - 3p)',fontsize=5)
plt.title('Binding profile for %s'%title,fontsize=5)
plt.xlim([0,1])
# Record data
storageDF=pd.DataFrame()
storageDF['bins']=pd.Series(bins)
storageDF['hist']=pd.Series(hist)
outfilepathToSave=outfilepath+'/PlotData_snoRNAhistogram_%s'%sType
storageDF.to_csv(outfilepathToSave)
i+=1
fig5.tight_layout()
fig5.savefig(outfilepath+'Figure5.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig5.savefig(outfilepath+'Figure5.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def getncRNABindingFrac(type_specific):
# 5' position on the negative strand is snoRNA stop coordinate.
neg_data=type_specific[type_specific['Strand']=='-']
neg_data['diff']=np.abs(neg_data['Gene End (bp)']-neg_data['RT_stop'])
neg_data['frac']=neg_data['diff']/(neg_data['Gene End (bp)']-neg_data['Gene Start (bp)'])
# 5' position on the positive strand is snoRNA start coordinate.
pos_data=type_specific[type_specific['Strand']=='+']
pos_data['diff']=np.abs(pos_data['Gene Start (bp)']-pos_data['RT_stop'])
pos_data['frac']=pos_data['diff']/(pos_data['Gene End (bp)']-pos_data['Gene Start (bp)'])
DF_ncRNAProfile=pd.concat([neg_data,pos_data])
return DF_ncRNAProfile
print "ncRNA gene body anaysis."
st_stopFiles=glob.glob(outfilepath+"*.geneStartStop")
st_stopFiles=[f for f in st_stopFiles if 'rRNA' not in f]
fig6=plt.figure(6)
plotDim=math.ceil(math.sqrt(len(st_stopFiles)))
i=1
for st_file in st_stopFiles:
name=st_file.split('clipGenes_')[1].split('_LowFDRreads')[0]
tmp=pd.read_csv(st_file)
tmp['RT_stop']=tmp['Start']+expand
tmp_profile=getncRNABindingFrac(tmp)
plt.subplot(plotDim,plotDim,i)
bins=np.arange(0,1,0.01)
hist,bins=np.histogram(tmp_profile['frac'],bins=bins)
hist=np.array(hist/float(tmp_profile['frac'].shape[0]),dtype=float)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
plt.bar(center,hist,align='center',width=width,color='blue',alpha=0.75)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.xlabel('Fraction of gene body (5p - 3p)',fontsize=5)
plt.title('Binding profile for %s'%name,fontsize=5)
i+=1
fig6.tight_layout()
fig6.savefig(outfilepath+'Figure6.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig6.savefig(outfilepath+'Figure6.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
logOpen.close()
# <codecell>
| gpl-2.0 |
sniemi/SamPy | resolve/misc/findslits.py | 1 | 25091 | '''
Fits slit image to a direct image to find x and y positions.
Currently the script uses a slit image. In the future however
we probably want to use the spectrum itself because there is
no guarantee that a slit confirmation image has always been
taken.
:todo: try to do the minimalization with scipy.optmize or
some other technique which might be faster and/or more
robust.
:requires: PyFITS
:requires: NumPy
:requires: matplotlib
:requires: SciPy
:author: Sami-Matias Niemi
'''
import matplotlib
#matplotlib.rc('text', usetex=True)
#matplotlib.rcParams['font.size'] = 17
import sys
from optparse import OptionParser
import pyfits as PF
import pylab as P
import numpy as np
import matplotlib.patches as patches
from matplotlib import cm
import scipy.optimize as optimize
import scipy.ndimage.interpolation as interpolation
#from SamPy
import SamPy.smnIO.write
import SamPy.smnIO.read
import SamPy.image.manipulation as m
import scipy.ndimage.filters as f
class FindSlitPosition():
def __init__(self):
pass
def _findSlitPositions(self, slitImage, threshold=1000):
'''
Finds slit positions from a slit image.
This method uses the Sobel filter in scipy.ndimage.filters
:todo: this is not ready!
:param: filename
'''
#sobel filter
filtered = f.sobel(slitImage, axis=1)
#create a mask above the threshold
msk = filtered > threshold
masked = filtered[msk]
#indices
y, x = np.indices(slitImage.shape)
yargs = y[msk]
return masked
def slitPosition(self, input, xy):
'''
Find slit positions from a confirmation image.
The script assumes that slits are illuminated and that
background gives at least 10 per cent increase for the
slit data.
:note: modper factor is used to modify the mean background to autolocate the slits.
:param: input: input data
:param: xy: x and y minimum and maximum position to identify a single slit
:rtype: dictionary
'''
#size modifier
sizemod = 1
#modifier
modper = 1.1
#shape of the input array
shape = input.shape
#check indices, rows and columns
row, col = np.indices(shape)
#create an intial mask
#msk = input > val
mn = (np.mean(input[2000:3000, 2000:3000]))
msk = input > (mn * modper)
rm = col[msk]
cm = row[msk]
#mask the appropriate slit
msk = ((rm > xy['xmin']) & (rm < xy['xmax'])) & ((cm < xy['ymax']) & (cm > xy['ymin']))
row = rm[msk]
col = cm[msk]
#check the ends
minrow = np.min(row) + sizemod
maxrow = np.max(row) - sizemod
mincol = np.min(col) + sizemod
maxcol = np.max(col) - sizemod
#get the width and height of the slit image
xymins = (minrow, mincol)
height = maxcol - mincol
width = maxrow - minrow
out = {}
out['xy'] = xymins
out['width'] = width
out['height'] = height
out['ymin'] = mincol
out['ymax'] = maxcol
out['xmin'] = minrow
out['xmax'] = maxrow
out['values'] = input[mincol:maxcol + 1, minrow:maxrow + 1]
out['shape'] = shape
out['throughput'] = 1.0
return out
def readSlitPositions(self):
'''
Reads slit positions from a slitfile and slitdata from another file.
This file should follow DS9 format, i.e.:
box 1545 871 7 499 0
box 1512 1522 7 614 0
box 1482 2175 7 499 0
:note: slit image positions, not the slit positions on the sky!
:return: information about the slits, positions and data in slit image
:rtype: dictionary
'''
slits = []
filedata = open(self.slitPos, 'r').readlines()
for i, line in enumerate(filedata):
out = {}
tmp = line.split()
out['width'] = int(tmp[3])
out['height'] = int(tmp[4])
out['ymid'] = int(tmp[2])
out['xmid'] = int(tmp[1])
out['ymin'] = out['ymid'] - (out['height'] / 2)
out['ymax'] = out['ymid'] + (out['height'] / 2)
out['xmin'] = out['xmid'] - (out['width'] / 2) + 1
out['xmax'] = out['xmid'] + (out['width'] / 2) + 1
out['xy'] = (out['xmin'], out['ymin'])
out['shape'] = self.slitImage.shape
out['throughput'] = 1.0
out['values'] = self.slitImage[out['ymin']:out['ymax'] + 1, out['xmin']:out['xmax'] + 1].copy()
out['number'] = i
out['pixels'] = len(out['values'].ravel())
if i == 0:
out['name'] = 'low'
elif i == 2:
out['name'] = 'up'
else:
out['name'] = 'mid'
slits.append(out)
self.slits = slits
def generateSlitMask(self, slits, throughput=False):
'''
This function can be used to generate a slit mask from given slits.
'''
if len(set([x['shape'] for x in slits])) > 1:
print 'Shape of the slits do not match'
#slitmask
slitmask = np.zeros(slits[0]['shape'])
for slit in slits:
if throughput:
val = slit['throughput']
else:
val = 1.0
slitmask[slit['ymin']:slit['ymax'] + 1,
slit['xmin']:slit['xmax'] + 1] = val
return slitmask
def generateSkyMask(self, slits, offsetx=0, offsety=0):
'''
This function can be used to generate a slit mask on the sky
'''
skymask = np.zeros(slits[0]['shape'])
for slit in slits:
skymask[slit['ymin'] + offsety:slit['ymax'] + 1 + offsety,
slit['xmin'] + offsetx:slit['xmax'] + 1 + offsetx] = 1
return skymask
def generateSlitImages(self, output, type='.pdf'):
'''
Generates diagnostic plots from slit image.
'''
rotText = 40
#generate a separate image of the slit data of each slit image.
for i, slit in enumerate(self.slits):
fig = P.figure()
ax = fig.add_subplot(111)
#take log10 from the slit data
tmp = slit['values'] * slit['throughput']
tmp[tmp <= 0.0] = 1
tmp = np.log10(tmp)
ax.imshow(tmp,
origin='lower', interpolation=None)
#rotate x axis labels
for tl in ax.get_xticklabels():
tl.set_rotation(rotText)
P.savefig(output + str(i + 1) + type)
P.close()
#make a single slit image
fig = P.figure()
for i, slit in enumerate(self.slits):
ax = fig.add_subplot(1, len(self.slits), i + 1)
#take log10 from the slit data
tmp = slit['values'] * slit['throughput']
tmp[tmp <= 0.0] = 1
tmp = np.log10(tmp)
#take log10 from the slit data
ax.imshow(tmp,
origin='lower', interpolation=None)
#rotate x axis labels
for tl in ax.get_xticklabels():
tl.set_rotation(rotText)
#annotate
ax.annotate('slit' + str(i + 1), xy=(0.5, 1.05),
xycoords='axes fraction', ha='center', va='center')
P.savefig(output + 'All' + type)
P.close()
def overPlotSlits(self, output, type='.pdf', logscale=True):
'''
Overplot the slits to image data. Will overplot both the original slit
positions and the best fitted position. Will also plot residuals.
:param: output, output file name
:param: type
:param: logscale, whether a log10 should be taken from the image data
'''
#make a copy of the imdata, in case we modify it...
img = self.img.copy()
fig = P.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
#show image
img[img < 0] = 0
if logscale:
img[img > 0] = np.log10(img[img > 0])
ax1.imshow(img, origin='lower', interpolation=None)
#original Slits
for slit in self.slits:
ax1.add_patch(patches.Rectangle(slit['xySky'],
slit['width'],
slit['height'],
fill=False))
#fitted slit positions
tmp = self.result['output'][np.argmin(self.result['output'][:, 3]), :]
rot = tmp[0]
x = tmp[1]
y = tmp[2]
for slit in self.slits:
tmp = (slit['xySky'][0] + x, slit['xySky'][1] + y)
patch = patches.Rectangle(tmp,
slit['width'],
slit['height'],
fill=False,
ec='red')
t2 = matplotlib.transforms.Affine2D().rotate_deg(rot) + ax1.transData
patch.set_transform(t2)
ax1.add_patch(patch)
#rotate x axis labels
for tl in ax1.get_xticklabels():
tl.set_rotation(40)
#rotate x axis labels
for tl in ax2.get_xticklabels():
tl.set_rotation(40)
#plot residuals
z = np.ones(img.shape)
for i, slit in enumerate(self.slits):
y1 = slit['yminSky'] + y
y2 = slit['ymaxSky'] + y + 1
x1 = slit['xmin'] + x
x2 = slit['xmax'] + x + 1
z[y1:y2, x1:x2] = slit['values'] / self.result['chiMinData'][i]
i2 = ax2.imshow(z, origin='lower', interpolation=None,
cmap=cm.get_cmap('binary'), vmin=0.795, vmax=1.205)
c2 = fig.colorbar(i2, ax=ax2, shrink=0.7, fraction=0.05)
c2.set_label('Slit Values / Direct Image Data')
#annotate
ax1.annotate('Fitted Position', xy=(0.5, 1.05),
xycoords='axes fraction', ha='center', va='center')
ax2.annotate('Residuals', xy=(0.5, 1.05),
xycoords='axes fraction', ha='center', va='center')
#save the first image
P.savefig(output + type)
#zoom-in version
ymin = np.min(np.asarray([x['yminSky'] for x in self.slits]))
ymax = np.max(np.asarray([x['ymaxSky'] for x in self.slits]))
xmin = np.min(np.asarray([x['xmin'] for x in self.slits]))
xmax = np.max(np.asarray([x['xmax'] for x in self.slits]))
ax1.set_xlim(xmin - 200, xmax + 200)
ax2.set_xlim(xmin - 200, xmax + 200)
ax1.set_ylim(ymin - 100, ymax + 100)
ax2.set_ylim(ymin - 100, ymax + 100)
P.savefig(output + 'Zoomed' + type)
P.close()
del img
def writeDS9RegionFile(self, output='skyslits.reg'):
'''
Writes a DS9 region file for all the slits.
Draws a rectangle around each slit.
'''
fh = open(output, 'w')
for slit in self.slits:
#DS0 box format is x, y, width, height, but x and y are the centre point
string = 'box %i %i %i %i 0\n' % (slit['xySky'][0] + slit['width'] / 2,
slit['xySky'][1] + slit['height'] / 2,
slit['width'],
slit['height'])
fh.write(string)
fh.close()
def approxSkyPosition(self, lw=553, up=553, lwthr=0.9, upthr=0.9):
'''
Generates an approximated sky position for slits.
Assumes that both slits are shifted 553 pixels in y direction.
Such an assumption is crude, but should allow a starting point.
:note: this functions modifies the slit throughput
:todo: one should refine this after measuring the sky position accurately.
:param: lw, pixels to shift the lower slit
:param: up, pixels to shift the upper slit
:param: lwthr, 1/lwthr will be the throughput modifier for the lower slit
:param: upthr, 1/upthr will be the throughput modifier for the upper slit
'''
for i, slit in enumerate(self.slits):
if slit['name'] == 'up':
mod = - up
thr = upthr
elif slit['name'] == 'low':
mod = lw
thr = lwthr
else:
mod = 0
thr = 1.0
self.slits[i]['yminSky'] = slit['ymin'] + mod
self.slits[i]['ymaxSky'] = slit['ymax'] + mod
self.slits[i]['xySky'] = (slit['xy'][0], slit['xy'][1] + mod)
self.slits[i]['throughput'] = 1. / thr
def chiSquare(self, model, obs):
'''
Simple chi**2 calculation
'''
r = np.sum((obs - model) ** 2 / model)
return r
def _fitfunct(self, x, y, directimage, slits):
#get data from direct image
dirdata = []
for slit in slits:
d = directimage[slit['ymin'] + int(y):slit['ymax'] + 1 + int(y),
slit['xmin'] + int(x):slit['xmax'] + 1 + int(x)]
dirdata.append(d)
obs = np.hstack((dirdata[0].ravel(),
dirdata[1].ravel(),
dirdata[2].ravel()))
obs /= np.max(obs)
return obs
def _errorf(self, params, directimage, slits, data):
return self._fitfunct(params[0], params[1], directimage, slits) - data
def fitSlitsToDirectImageLSQ(self, slits, directimage, params=[-1, -1]):
'''
Uses scipy.optimize.leastsq
:note: This does not really work at the mo...
'''
#generates a model array from the slit values, takes into account potential
#throughput of a slit
data = np.hstack((slits[0]['values'].ravel() * slits[0]['throughput'],
slits[1]['values'].ravel() * slits[1]['throughput'],
slits[2]['values'].ravel() * slits[2]['throughput']))
data /= np.max(data)
p = optimize.leastsq(self._errorf,
params,
args=(directimage, slits, data),
full_output=True,
ftol=1e-18,
xtol=1e-18)
return p
def fitSlitsToDirectImage(self,
xran=50, yran=50, step=1,
rot=1.0, rotstep=0.1, rotation=True,
normalize=False, debug=True):
'''
Fits a slit image to a direct image.
This functions does not collapse the slit image, but uses each pixel.
By default the counts are normalized to a peak count, but this can
be controlled using the optional keyword normalize.
:param: xran, +/- x-range to cover
:param: yran, +/- y-range to cover
:param: step, size of pixel steps in x and y
:param: rot, +/- rotation angle in degrees
:param: rotstep, step in degrees
:param: normalize, whether slit and direct image values should be normalized or not
:param: debug, print debugging information
:rtype: dictionary
'''
#generates a model array from the slit values, takes into account potential
#throughput of a slit
self.model = np.hstack((self.slits[0]['values'].ravel() * self.slits[0]['throughput'],
self.slits[1]['values'].ravel() * self.slits[1]['throughput'],
self.slits[2]['values'].ravel() * self.slits[2]['throughput']))
#self.msk = self.model > 0.0
#self.model = self.model[self.msk]
if normalize:
self.model /= np.max(self.model)
norm = len(self.model)
#generate rotations
if rotation:
rotations = np.arange(-rot, rot, rotstep)
rotations[(rotations < 1e-8) & (rotations > -1e-8)] = 0.0
#make a copy of the direct image
origimage = self.img.copy()
else:
rotations = [0, ]
out = []
chmin = -9.99
cm = 1e50
#loop over a range of rotations, x and y positions around the nominal position and record x, y and chisquare
for r in rotations:
if rotation:
if r != 0.0:
d = interpolation.rotate(origimage, r, reshape=False)
else:
d = origimage.copy()
else:
d = self.img.copy()
for x in range(-xran, xran, step):
for y in range(-yran, yran, step):
dirdata = []
#get data from direct image
for s in self.slits:
data = d[s['yminSky'] + y:s['ymaxSky'] + 1 + y, s['xmin'] + x:s['xmax'] + 1 + x]
dirdata.append(data)
obs = np.hstack((dirdata[0].ravel(),
dirdata[1].ravel(),
dirdata[2].ravel()))
#obs = obs[self.msk]
if normalize:
obs /= np.max(obs)
chisq = self.chiSquare(self.model, obs)
out.append([r, x, y, chisq, chisq / norm])
#save the dirdata of the minimum chisqr
if chisq < cm:
chmin = dirdata
cm = chisq
if debug:
print r, x, y, chisq, chisq / norm
#return dictionary
r = {}
r['rot'] = rot
r['rotation_step'] = rotstep
r['xran'] = xran
r['yran'] = yran
r['model'] = self.model
r['output'] = np.array(out)
r['chiMinData'] = chmin
self.result = r
def fakeSlitData(self):
'''
Cuts out imaging data to test the fitting algorithm.
'''
for slit in self.slits:
slit['values'] = self.slitImage[slit['ymin']:slit['ymax'] + 1, slit['xmin']:slit['xmax'] + 1]
def plotMinimalization(self, output='minim', type='.png'):
'''
Generates a two dimensional map of the minimalization.
:param: data
'''
d = self.result['output']
#begin image
P.figure()
P.scatter(d[:, 1],
d[:, 2],
c=1. / np.log10(d[:, 3]),
s=20,
cmap=cm.get_cmap('jet'),
edgecolor='none',
alpha=0.5)
P.xlim(-self.result['xran'], self.result['xran'])
P.ylim(-self.result['yran'], self.result['yran'])
P.xlabel('X [pixels]')
P.ylabel('Y [pixels]')
P.savefig(output + 'Map' + type)
P.close()
#second figure
P.figure()
P.scatter(d[:, 0], d[:, 3], s=2)
P.xlim(-self.result['rot'], self.result['rot'])
P.ylim(0.9 * np.min(d[:, 3]), 1.05 * np.max(d[:, 3]))
P.xlabel('Rotation [degrees]')
P.ylabel('$\chi^{2}$')
P.savefig(output + 'Rot' + type)
P.close()
#third figure
P.figure()
P.scatter(d[:, 1], d[:, 3], s=2)
P.xlim(-self.result['xran'], self.result['xran'])
P.ylim(0.9 * np.min(d[:, 3]), 1.05 * np.max(d[:, 3]))
P.xlabel('X [pixels]')
P.ylabel('$\chi^{2}$')
P.savefig(output + 'XCut' + type)
P.close()
#forth figure
P.figure()
P.scatter(d[:, 2], d[:, 3], s=2)
P.xlim(-self.result['yran'], self.result['yran'])
P.ylim(0.9 * np.min(d[:, 3]), 1.05 * np.max(d[:, 3]))
P.xlabel('Y [pixels]')
P.ylabel('$\chi^{2}$')
P.savefig(output + 'YCut' + type)
P.close()
def outputMinima(self, output='min.txt', stdout=True):
'''
Outputs the results to a file and also the screen if stdout = True.
'''
tmp = self.result['output'][np.argmin(self.result['output'][:, 3]), :]
str = '{0:>s}\t{1:>s}\t{2:>s}\t{3:.1f}\t{4:.0f}\t{5:.0f}\t{6:.1f}'.format(self.fitImage,
self.slit,
self.slitPos,
tmp[0],
tmp[1],
tmp[2],
tmp[3])
#to screen
if stdout:
print '\n\ndirect image slit image slit pos \t\t rot \t x \t y \t chi**2'
print str
#to file
fh = open(output, 'a')
fh.write(str + '\n')
fh.close()
def runAll(self, opts, args):
'''
Driver function
'''
if (opts.slit is None or opts.fitImage is None):
processArgs(True)
sys.exit(1)
#rename the command line options
self.slit = opts.slit
self.fitImage = opts.fitImage
#slit position file defaults to slit.reg
if (opts.position is None):
self.slitPos = 'slit.reg'
print 'Using {0:>s} for slit positions'.format(slitPos)
else:
self.slitPos = opts.position
#debugging mode, where slit data are being faked
debug = opts.debug
#whether the data should be blurred or not
blur = opts.blur
#boolean to control whether the slit positions should be found automatically or not
automatic = opts.automatic
#load images
img = PF.open(self.fitImage, ignore_missing_end=True)[0].data
if img.shape[0] == 1:
img = img[0]
slitimage = PF.open(self.slit, ignore_missing_end=True)[0].data
if slitimage.shape[0] == 1:
slitimage = slitimage[0]
if blur:
img = m.blurImage(img, 4)
self.slitImage = slitimage
self.img = img
if automatic:
#gets the slit positions automatically, does not work perfectly
upslit = self.slitPosition(slitimage, {'xmin': 1460, 'xmax': 1500, 'ymin': 1900, 'ymax': 2500})
midslit = self.slitPosition(slitimage, {'xmin': 1500, 'xmax': 1525, 'ymin': 1200, 'ymax': 1850})
lowslit = self.slitPosition(slitimage, {'xmin': 1530, 'xmax': 1550, 'ymin': 600, 'ymax': 1130})
self.slits = (upslit, midslit, lowslit)
else:
self.readSlitPositions()
self.generateSlitImages('slits')
self.approxSkyPosition()
#self.approxSkyPosition(lw=553, up=553)
self.writeDS9RegionFile()
if debug:
self.fakeSlitData()
#a = fitSlitsToDirectImageLSQ(slits, img)
#print a
#import sys; sys.exit()
#find the chisqr minimum and make a diagnostic plot
#self.fitSlitsToDirectImage(xran=100, yran=100, rotation=False)
self.fitSlitsToDirectImage(xran=100, yran=100, rot=3.0, rotstep=0.1)
self.plotMinimalization()
#output some info
self.outputMinima()
#generates diagnostic plots and writes the slit positions for DS9 inspection
self.overPlotSlits('overplottedOriginalsLog')
def processArgs(just_print_help=False):
'''
Processes command line arguments
'''
parser = OptionParser()
parser.add_option("-s", "--slit", dest="slit",
help="Name of the slit image file", metavar="string")
parser.add_option("-f", "--fitting", dest="fitImage",
help='Name of the direct image to which the slit data will be fitted', metavar='string')
parser.add_option("-d", "--debug", dest="debug", action='store_true',
help='Debugging mode on')
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
help="Verbose mode on")
parser.add_option("-p", "--position", dest="position",
help="Name of the slit position file", metavar="string")
parser.add_option("-b", "--blur", action="store_true", dest="blur",
help="Whether the input direct image should be gaussian blurred or not")
parser.add_option("-a", "--automatic", action="store_true", dest="automatic",
help="If on tries to determine slit positions automatically from the slit image")
if just_print_help:
parser.print_help()
else:
return parser.parse_args()
if __name__ == '__main__':
#Gets the command line arguments and call main function
find = FindSlitPosition()
find.runAll(*processArgs()) | bsd-2-clause |
jdmcbr/blaze | blaze/server/tests/test_server.py | 3 | 13833 | from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('flask')
from base64 import b64encode
import datashape
import numpy as np
from datetime import datetime
from pandas import DataFrame
from toolz import pipe
from odo import odo
from blaze.utils import example
from blaze import discover, symbol, by, CSV, compute, join, into, resource
from blaze.server.client import mimetype
from blaze.server.server import Server, to_tree, from_tree
from blaze.server.serialization import all_formats
accounts = DataFrame([['Alice', 100], ['Bob', 200]],
columns=['name', 'amount'])
cities = DataFrame([['Alice', 'NYC'], ['Bob', 'LA']],
columns=['name', 'city'])
events = DataFrame([[1, datetime(2000, 1, 1, 12, 0, 0)],
[2, datetime(2000, 1, 2, 12, 0, 0)]],
columns=['value', 'when'])
db = resource('sqlite:///' + example('iris.db'))
data = {'accounts': accounts,
'cities': cities,
'events': events,
'db': db}
@pytest.fixture(scope='module')
def server():
s = Server(data, all_formats)
s.app.testing = True
return s
@pytest.yield_fixture
def test(server):
with server.app.test_client() as c:
yield c
def test_datasets(test):
response = test.get('/datashape')
assert response.data.decode('utf-8') == str(discover(data))
@pytest.mark.parametrize('serial', all_formats)
def test_bad_responses(test, serial):
assert 'OK' not in test.post(
'/compute/accounts.{name}'.format(name=serial.name),
data=serial.dumps(500),
).status
assert 'OK' not in test.post(
'/compute/non-existent-table.{name}'.format(name=serial.name),
data=serial.dumps(0),
).status
assert 'OK' not in test.post(
'/compute/accounts.{name}'.format(name=serial.name),
).status
def test_to_from_json():
t = symbol('t', 'var * {name: string, amount: int}')
assert from_tree(to_tree(t)).isidentical(t)
assert from_tree(to_tree(t.amount + 1)).isidentical(t.amount + 1)
def test_to_tree():
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t.amount.sum()
expected = {'op': 'sum',
'args': [{'op': 'Field',
'args':
[
{'op': 'Symbol',
'args': [
't',
'var * {name: string, amount: int32}',
None
]
},
'amount'
]
}, [0], False]
}
assert to_tree(expr) == expected
@pytest.mark.parametrize('serial', all_formats)
def test_to_tree_slice(serial):
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t[:5]
expr2 = pipe(expr, to_tree, serial.dumps, serial.loads, from_tree)
assert expr.isidentical(expr2)
def test_to_from_tree_namespace():
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t.name
tree = to_tree(expr, names={t: 't'})
assert tree == {'op': 'Field', 'args': ['t', 'name']}
new = from_tree(tree, namespace={'t': t})
assert new.isidentical(expr)
def test_from_tree_is_robust_to_unnecessary_namespace():
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t.amount + 1
tree = to_tree(expr) # don't use namespace
assert from_tree(tree, {'t': t}).isidentical(expr)
t = symbol('t', discover(data))
@pytest.mark.parametrize('serial', all_formats)
def test_compute(test, serial):
expr = t.accounts.amount.sum()
query = {'expr': to_tree(expr)}
expected = 300
response = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
assert 'OK' in response.status
data = serial.loads(response.data)
assert data['data'] == expected
assert data['names'] == ['amount_sum']
@pytest.mark.parametrize('serial', all_formats)
def test_get_datetimes(test, serial):
expr = t.events
query = {'expr': to_tree(expr)}
response = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
assert 'OK' in response.status
data = serial.loads(response.data)
ds = datashape.dshape(data['datashape'])
result = into(np.ndarray, data['data'], dshape=ds)
assert into(list, result) == into(list, events)
assert data['names'] == events.columns.tolist()
@pytest.mark.parametrize('serial', all_formats)
def dont_test_compute_with_namespace(test, serial):
query = {'expr': {'op': 'Field',
'args': ['accounts', 'name']}}
expected = ['Alice', 'Bob']
response = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
assert 'OK' in response.status
data = serial.loads(response.data)
assert data['data'] == expected
assert data['names'] == ['name']
@pytest.yield_fixture
def iris_server():
iris = CSV(example('iris.csv'))
s = Server(iris, all_formats)
s.app.testing = True
with s.app.test_client() as c:
yield c
iris = CSV(example('iris.csv'))
@pytest.mark.parametrize('serial', all_formats)
def test_compute_with_variable_in_namespace(iris_server, serial):
test = iris_server
t = symbol('t', discover(iris))
pl = symbol('pl', 'float32')
expr = t[t.petal_length > pl].species
tree = to_tree(expr, {pl: 'pl'})
blob = serial.dumps({'expr': tree, 'namespace': {'pl': 5}})
resp = test.post(
'/compute',
data=blob,
headers=mimetype(serial)
)
assert 'OK' in resp.status
data = serial.loads(resp.data)
result = data['data']
expected = list(compute(expr._subs({pl: 5}), {t: iris}))
assert result == expected
assert data['names'] == ['species']
@pytest.mark.parametrize('serial', all_formats)
def test_compute_by_with_summary(iris_server, serial):
test = iris_server
t = symbol('t', discover(iris))
expr = by(
t.species,
max=t.petal_length.max(),
sum=t.petal_width.sum(),
)
tree = to_tree(expr)
blob = serial.dumps({'expr': tree})
resp = test.post(
'/compute',
data=blob,
headers=mimetype(serial)
)
assert 'OK' in resp.status
data = serial.loads(resp.data)
result = DataFrame(data['data']).values
expected = compute(expr, iris).values
np.testing.assert_array_equal(result[:, 0], expected[:, 0])
np.testing.assert_array_almost_equal(result[:, 1:], expected[:, 1:])
assert data['names'] == ['species', 'max', 'sum']
@pytest.mark.parametrize('serial', all_formats)
def test_compute_column_wise(iris_server, serial):
test = iris_server
t = symbol('t', discover(iris))
subexpr = ((t.petal_width / 2 > 0.5) &
(t.petal_length / 2 > 0.5))
expr = t[subexpr]
tree = to_tree(expr)
blob = serial.dumps({'expr': tree})
resp = test.post(
'/compute',
data=blob,
headers=mimetype(serial)
)
assert 'OK' in resp.status
data = serial.loads(resp.data)
result = data['data']
expected = compute(expr, iris)
assert list(map(tuple, result)) == into(list, expected)
assert data['names'] == t.fields
@pytest.mark.parametrize('serial', all_formats)
def test_multi_expression_compute(test, serial):
s = symbol('s', discover(data))
expr = join(s.accounts, s.cities)
resp = test.post(
'/compute',
data=serial.dumps(dict(expr=to_tree(expr))),
headers=mimetype(serial)
)
assert 'OK' in resp.status
respdata = serial.loads(resp.data)
result = respdata['data']
expected = compute(expr, {s: data})
assert list(map(tuple, result)) == into(list, expected)
assert respdata['names'] == expr.fields
@pytest.mark.parametrize('serial', all_formats)
def test_leaf_symbol(test, serial):
query = {'expr': {'op': 'Field', 'args': [':leaf', 'cities']}}
resp = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
data = serial.loads(resp.data)
a = data['data']
b = into(list, cities)
assert list(map(tuple, a)) == b
assert data['names'] == cities.columns.tolist()
@pytest.mark.parametrize('serial', all_formats)
def test_sqlalchemy_result(test, serial):
expr = t.db.iris.head(5)
query = {'expr': to_tree(expr)}
response = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
assert 'OK' in response.status
data = serial.loads(response.data)
result = data['data']
assert all(isinstance(item, (tuple, list)) for item in result)
assert data['names'] == t.db.iris.fields
def test_server_accepts_non_nonzero_ables():
Server(DataFrame())
@pytest.mark.parametrize('serial', all_formats)
def test_server_can_compute_sqlalchemy_reductions(test, serial):
expr = t.db.iris.petal_length.sum()
query = {'expr': to_tree(expr)}
response = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
assert 'OK' in response.status
respdata = serial.loads(response.data)
result = respdata['data']
assert result == odo(compute(expr, {t: data}), int)
assert respdata['names'] == ['petal_length_sum']
@pytest.mark.parametrize('serial', all_formats)
def test_serialization_endpoints(test, serial):
expr = t.db.iris.petal_length.sum()
query = {'expr': to_tree(expr)}
response = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
assert 'OK' in response.status
respdata = serial.loads(response.data)
result = respdata['data']
assert result == odo(compute(expr, {t: data}), int)
assert respdata['names'] == ['petal_length_sum']
@pytest.fixture
def has_bokeh():
try:
from bokeh.server.crossdomain import crossdomain
except ImportError as e:
pytest.skip(str(e))
@pytest.mark.parametrize('serial', all_formats)
def test_cors_compute(test, serial, has_bokeh):
expr = t.db.iris.petal_length.sum()
res = test.post(
'/compute',
data=serial.dumps(dict(expr=to_tree(expr))),
headers=mimetype(serial)
)
assert res.status_code == 200
assert res.headers['Access-Control-Allow-Origin'] == '*'
assert 'HEAD' in res.headers['Access-Control-Allow-Methods']
assert 'OPTIONS' in res.headers['Access-Control-Allow-Methods']
assert 'POST' in res.headers['Access-Control-Allow-Methods']
# we don't allow gets because we're always sending data
assert 'GET' not in res.headers['Access-Control-Allow-Methods']
@pytest.mark.parametrize('method',
['get',
pytest.mark.xfail('head', raises=AssertionError),
pytest.mark.xfail('options', raises=AssertionError),
pytest.mark.xfail('post', raises=AssertionError)])
def test_cors_datashape(test, method, has_bokeh):
res = getattr(test, method)('/datashape')
assert res.status_code == 200
assert res.headers['Access-Control-Allow-Origin'] == '*'
assert 'HEAD' not in res.headers['Access-Control-Allow-Methods']
assert 'OPTIONS' not in res.headers['Access-Control-Allow-Methods']
assert 'POST' not in res.headers['Access-Control-Allow-Methods']
# we only allow GET requests
assert 'GET' in res.headers['Access-Control-Allow-Methods']
@pytest.fixture(scope='module')
def username():
return 'blaze-dev'
@pytest.fixture(scope='module')
def password():
return 'SecretPassword123'
@pytest.fixture(scope='module')
def server_with_auth(username, password):
def auth(a):
return a and a.username == username and a.password == password
s = Server(data, all_formats, authorization=auth)
s.app.testing = True
return s
@pytest.yield_fixture
def test_with_auth(server_with_auth):
with server_with_auth.app.test_client() as c:
yield c
def basic_auth(username, password):
return (
b'Basic ' + b64encode(':'.join((username, password)).encode('utf-8'))
)
@pytest.mark.parametrize('serial', all_formats)
def test_auth(test_with_auth, username, password, serial):
expr = t.accounts.amount.sum()
query = {'expr': to_tree(expr)}
r = test_with_auth.get(
'/datashape',
headers={'authorization': basic_auth(username, password)},
)
assert r.status_code == 200
headers = mimetype(serial)
headers['authorization'] = basic_auth(username, password)
s = test_with_auth.post(
'/compute',
data=serial.dumps(query),
headers=headers,
)
assert s.status_code == 200
u = test_with_auth.get(
'/datashape',
headers={'authorization': basic_auth(username + 'a', password + 'a')},
)
assert u.status_code == 401
headers['authorization'] = basic_auth(username + 'a', password + 'a')
v = test_with_auth.post(
'/compute',
data=serial.dumps(query),
headers=headers,
)
assert v.status_code == 401
@pytest.mark.parametrize('serial', all_formats)
def test_minute_query(test, serial):
expr = t.events.when.minute
query = {'expr': to_tree(expr)}
result = test.post(
'/compute',
headers=mimetype(serial),
data=serial.dumps(query)
)
expected = {
'data': [0, 0],
'names': ['when_minute'],
'datashape': '2 * int64'
}
assert result.status_code == 200
assert expected == serial.loads(result.data)
| bsd-3-clause |
poeticcapybara/pythalesians | pythalesians-examples/plotly_examples.py | 1 | 4222 | __author__ = 'saeedamen'
import datetime
from pythalesians.market.loaders.lighttimeseriesfactory import LightTimeSeriesFactory
from pythalesians.market.requests.timeseriesrequest import TimeSeriesRequest
from pythalesians.timeseries.calcs.timeseriescalcs import TimeSeriesCalcs
from pythalesians.graphics.graphs.plotfactory import PlotFactory
from pythalesians.graphics.graphs.graphproperties import GraphProperties
if True:
time_series_request = TimeSeriesRequest(
start_date = "01 Jan 2013", # start date
finish_date = datetime.date.today(), # finish date
freq = 'daily', # daily data
data_source = 'google', # use Bloomberg as data source
tickers = ['Apple', 'S&P500 ETF'], # ticker (Thalesians)
fields = ['close'], # which fields to download
vendor_tickers = ['aapl', 'spy'], # ticker (Google)
vendor_fields = ['Close'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
ltsf = LightTimeSeriesFactory()
tsc = TimeSeriesCalcs()
df = tsc.create_mult_index_from_prices(ltsf.harvest_time_series(time_series_request))
gp = GraphProperties()
gp.title = "S&P500 vs Apple"
# plot first with PyThalesians and then Plotly (via Cufflinks)
# just needs 1 word to change
# (although, note that AdapterCufflinks does have some extra parameters that can be set in
# GraphProperties)
gp.plotly_username = 'thalesians'
gp.plotly_world_readable = True
pf = PlotFactory()
pf.plot_generic_graph(df, type = 'line', adapter = 'pythalesians', gp = gp)
pf.plot_generic_graph(df, type = 'line', adapter = 'cufflinks', gp = gp)
# test simple Plotly bar charts - average differences in EURUSDV1M-1Y vol and USDJPYV1M-1Y slope over past sixth months
if True:
from datetime import timedelta
ltsf = LightTimeSeriesFactory()
end = datetime.datetime.utcnow()
start = end - timedelta(days=180)
tickers = ['EURUSDV1M', 'EURUSDV1Y', 'USDJPYV1M', 'USDJPYV1Y']
vendor_tickers = ['EURUSDV1M BGN Curncy', 'EURUSDV1Y BGN Curncy', 'USDJPYV1M BGN Curncy', 'USDJPYV1Y BGN Curncy']
time_series_request = TimeSeriesRequest(
start_date = start, # start date
finish_date = datetime.date.today(), # finish date
freq = 'daily', # daily data
data_source = 'bloomberg', # use Bloomberg as data source
tickers = tickers, # ticker (Thalesians)
fields = ['close'], # which fields to download
vendor_tickers = vendor_tickers, # ticker (Bloomberg)
vendor_fields = ['PX_LAST'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
df = ltsf.harvest_time_series(time_series_request)
import pandas
df.columns = [x.replace('.close', '') for x in df.columns.values]
short_dates = df[["EURUSDV1M", "USDJPYV1M"]]
long_dates = df[["EURUSDV1Y", "USDJPYV1Y"]]
short_dates, long_dates = short_dates.align(long_dates, join='left', axis = 0)
slope = pandas.DataFrame(data = short_dates.values - long_dates.values, index = short_dates.index,
columns = ["EURUSDV1M-1Y", "USDJPYV1M-1Y"])
# resample fand calculate average over month
slope_monthly = slope.resample('M', how='mean')
slope_monthly.index = [str(x.year) + '/' + str(x.month) for x in slope_monthly.index]
pf = PlotFactory()
gp = GraphProperties()
gp.source = 'Thalesians/BBG'
gp.title = 'Vol slopes in EUR/USD and USD/JPY recently'
gp.scale_factor = 2
gp.display_legend = True
gp.chart_type = 'bar'
gp.x_title = 'Dates'
gp.y_title = 'Pc'
# plot using Cufflinks
pf.plot_bar_graph(slope_monthly, adapter = 'cufflinks', gp = gp) | apache-2.0 |
rneher/FitnessInference | toydata/figure_scripts/Fig3_polarizer.py | 1 | 6419 | import glob,sys
import numpy as np
sys.path.append('../../flu/src')
import test_flu_prediction as test_flu
import matplotlib.pyplot as plt
from matplotlib import cm
import analysis_utils_toy_data as AU
file_formats = ['.svg', '.pdf']
plt.rcParams.update(test_flu.mpl_params)
line_styles = ['-', '--', '-.']
cols = ['b', 'r', 'g', 'c', 'm', 'k', 'y']
cols+=cols
figure_folder = '../figures/'
data_dir = '../data_new'
prefix= '/20140820_'
N_list = [20000] #,20000]
mu_list = [1e-6,2e-6, 4e-6, 8e-6, 16e-6, 32e-6, 64e-6] #,128e-6]
#nflip_list = [0.02,0.04, 0.08, 0.16]
offset = 2
# multiply by 2 to transform to pairwise distance
m_list = 2.0**np.arange(-6,4, 1) * 0.5
m_to_plot = m_list[offset:-2]
nflip = 0.08 #, 0.16]
sdt_list = [1,100] #determines whether 2 genomes are sampled every generation, or 200 every 100 gen
pred, norm_pred, run_stats, corrcoef = AU.load_prediction_data(prefix, N_list, mu_list, [nflip],
sdt_list, return_mean=True, polarizer=True)
pred_I, norm_pred_I, run_stats_I = AU.load_prediction_data(prefix, N_list, mu_list, [nflip],
sdt_list, return_mean=True, polarizer=False)
D,gamma,omega = 0.2,1.0,0.3
valdt = 200
ssize = 200
L=2000
mean_fitness_true_fitness_spearman_i = -4
###################################################################
### correlation vs pairwise diversity
###################################################################
for sdt in [1,100]:
pred_label = (valdt,)
pred_label_I = ssize, gamma, D, omega, valdt
### PLOT FITNESS CORRELATION VS PAIRWISE DIVERSITY ###
plt.figure(figsize= (8,6))
#plt.title('sdt = '+str(sdt))
ax = plt.subplot(111)
for ni, N in enumerate(N_list):
for mi,m in enumerate(m_to_plot):
print [corrcoef[(N,mu,nflip,sdt)+pred_label][0][mi+offset] for mu in mu_list]
# if mi: label_str = None
# else: label_str='$n_A = '+str(nflip)+'$'
label_str=r'$\tau = '+str(m)+'$'
plt.errorbar([run_stats[(N,mu,nflip,sdt)][-1] for mu in mu_list],
[corrcoef[(N,mu,nflip,sdt)+pred_label][0][mi+offset] for mu in mu_list],
[corrcoef[(N,mu,nflip,sdt)+pred_label][2][mi+offset] for mu in mu_list],
c=cm.jet((mi+1.0)/(1+len(m_to_plot))), ls='-', label = label_str , lw=2)
plt.errorbar([run_stats[(N,mu,nflip,sdt)][-1] for mu in mu_list],
[pred_I[(N,mu,nflip,sdt)+pred_label_I][0][mean_fitness_true_fitness_spearman_i] for mu in mu_list],
[pred_I[(N,mu,nflip,sdt)+pred_label_I][2][mean_fitness_true_fitness_spearman_i] for mu in mu_list],
c='k', ls='--', label = 'Fitness inference', lw=2)
plt.ylabel("Spearman's correlation")
plt.xlabel('average pairwise distance')
plt.xscale('log')
plt.legend(loc=4)
#add panel label
#plt.text(0.02,0.9,'Fig.~1-S1', transform = plt.gca().transAxes, fontsize = 20)
plt.xlim([1.0, 100])
plt.tight_layout()
for ff in file_formats:
plt.savefig(figure_folder+'Fig3_pairwise_diversity_vs_predictability_polarizer_sdt_'+str(sdt)+'_nflip_'+str(nflip)+'_valdt_'+str(valdt)+ff)
### PLOT prediction_success VS PAIRWISE DIVERSITY ###
plt.figure(figsize= (8,6))
ax = plt.subplot(111)
#plt.title(r'$dt='+str(sdt)+'$')
for ni, N in enumerate(N_list):
for mi,m in enumerate(m_to_plot):
# if mi: label_str = None
# else: label_str='$n_A = '+str(nflip)+'$'
label_str=r'$\tau = '+str(m)+'$'
plt.errorbar([run_stats[(N,mu,nflip,sdt)][-1] for mu in mu_list],
[norm_pred[(N,mu,nflip,sdt)+pred_label][0][2+mi] for mu in mu_list],
[norm_pred[(N,mu,nflip,sdt)+pred_label][2][2+mi] for mu in mu_list],
c=cm.jet((mi+1.0)/(1+len(m_to_plot))), ls='-', label =label_str ,lw=2)
plt.errorbar([run_stats[(N,mu,nflip,sdt)][-1] for mu in mu_list],
[norm_pred_I[(N,mu,nflip,sdt)+pred_label_I][0][1] for mu in mu_list],
[norm_pred_I[(N,mu,nflip,sdt)+pred_label_I][2][1] for mu in mu_list],
c='k', ls='--', label = 'Fitness inference', lw=2)
plt.ylabel(r'average distance $\Delta$ to future populations')
plt.xlabel('average pairwise distance')
#add panel label
plt.xscale('log')
plt.legend(loc=1)
plt.xlim([1.0, 100])
plt.tight_layout()
for ff in file_formats:
plt.savefig(figure_folder+'Fig3_S1_pairwise_diversity_vs_distance_sdt_'+str(sdt)+'_nflip_'+str(nflip)+'_polarizer_valdt_'+str(valdt)+ff)
#plt.close()
###################################################################
### correlation vs pairwise diversity
###################################################################
## plot gamma versus the number of predictions that are worse than random
# reload the data without averaging over the different realizations.
pred, norm_pred, run_stats,corrcoeffs = AU.load_prediction_data(prefix, N_list, mu_list, [nflip],
sdt_list, return_mean=False, polarizer=True)
for sdt in [100]:
### PLOT FITNESS CORRELATION VS DSCALE ###
plt.figure(figsize= (10,6))
ax = plt.subplot(111)
#plt.title(r'$\omega='+str(omega)+',\;dt='+str(sdt)+'$')
for mi,mu in enumerate(mu_list):
for ni, N in enumerate(N_list):
label = (N,mu,nflip,sdt)+(valdt,)
if ni==0:
label_str = r'$u ='+str(mu*L)+'$'
else:
label_str = None
plt.plot(m_list, [np.mean(pred[label][:,0]<pred[label][:,memi+2])
for memi,m in enumerate(m_list)], lw=2, marker='o', markersize=10,
ls=line_styles[ni], c=cols[mi], label = label_str)
#plt.xscale('log')
#add panel label
plt.text(0.02,0.9,'Fig.~2-S3', transform = plt.gca().transAxes, fontsize = 20)
plt.xlim([0.01, 10.5])
plt.xscale('log')
plt.ylabel('worse than random (out of 100)')
plt.xlabel(r'time rescaling $\gamma$')
plt.legend(loc=1,numpoints=1)
for ff in file_formats:
plt.savefig(figure_folder+'Fig2_S3_gamma_vs_predictability_polarizer_sdt_'+str(sdt)+'_nflip_'+str(nflip)+'_w_'+str(omega)+'_valdt_'+str(valdt)+ff)
| mit |
xdnian/pyml | code/optional-py-scripts/ch10.py | 2 | 14374 | # Sebastian Raschka, 2015 (http://sebastianraschka.com)
# Python Machine Learning - Code Examples
#
# Chapter 10 - Predicting Continuous Target Variables with Regression Analysis
#
# S. Raschka. Python Machine Learning. Packt Publishing Ltd., 2015.
# GitHub Repo: https://github.com/rasbt/python-machine-learning-book
#
# License: MIT
# https://github.com/rasbt/python-machine-learning-book/blob/master/LICENSE.txt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RANSACRegressor
from sklearn.cross_validation import train_test_split
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import Lasso
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
# Added version check for recent scikit-learn 0.18 checks
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
if Version(sklearn_version) < '0.18':
from sklearn.cross_validation import train_test_split
else:
from sklearn.model_selection import train_test_split
#############################################################################
print(50 * '=')
print('Section: Exploring the Housing dataset')
print(50 * '-')
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/'
'housing/housing.data',
header=None,
sep='\s+')
df.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS',
'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
print('Dataset excerpt:\n\n', df.head())
#############################################################################
print(50 * '=')
print('Section: Visualizing the important characteristics of a dataset')
print(50 * '-')
sns.set(style='whitegrid', context='notebook')
cols = ['LSTAT', 'INDUS', 'NOX', 'RM', 'MEDV']
sns.pairplot(df[cols], size=2.5)
# plt.tight_layout()
# plt.savefig('./figures/scatter.png', dpi=300)
plt.show()
cm = np.corrcoef(df[cols].values.T)
sns.set(font_scale=1.5)
hm = sns.heatmap(cm,
cbar=True,
annot=True,
square=True,
fmt='.2f',
annot_kws={'size': 15},
yticklabels=cols,
xticklabels=cols)
# plt.tight_layout()
# plt.savefig('./figures/corr_mat.png', dpi=300)
plt.show()
sns.reset_orig()
#############################################################################
print(50 * '=')
print('Section: Solving regression for regression'
' parameters with gradient descent')
print(50 * '-')
class LinearRegressionGD(object):
def __init__(self, eta=0.001, n_iter=20):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
self.w_ = np.zeros(1 + X.shape[1])
self.cost_ = []
for i in range(self.n_iter):
output = self.net_input(X)
errors = (y - output)
self.w_[1:] += self.eta * X.T.dot(errors)
self.w_[0] += self.eta * errors.sum()
cost = (errors**2).sum() / 2.0
self.cost_.append(cost)
return self
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
return self.net_input(X)
X = df[['RM']].values
y = df['MEDV'].values
sc_x = StandardScaler()
sc_y = StandardScaler()
X_std = sc_x.fit_transform(X)
y_std = sc_y.fit_transform(y)
lr = LinearRegressionGD()
lr.fit(X_std, y_std)
plt.plot(range(1, lr.n_iter+1), lr.cost_)
plt.ylabel('SSE')
plt.xlabel('Epoch')
# plt.tight_layout()
# plt.savefig('./figures/cost.png', dpi=300)
plt.show()
def lin_regplot(X, y, model):
plt.scatter(X, y, c='lightblue')
plt.plot(X, model.predict(X), color='red', linewidth=2)
return
lin_regplot(X_std, y_std, lr)
plt.xlabel('Average number of rooms [RM] (standardized)')
plt.ylabel('Price in $1000\'s [MEDV] (standardized)')
# plt.tight_layout()
# plt.savefig('./figures/gradient_fit.png', dpi=300)
plt.show()
print('Slope: %.3f' % lr.w_[1])
print('Intercept: %.3f' % lr.w_[0])
num_rooms_std = sc_x.transform([5.0])
price_std = lr.predict(num_rooms_std)
print("Price in $1000's: %.3f" % sc_y.inverse_transform(price_std))
#############################################################################
print(50 * '=')
print('Section: Estimating the coefficient of a'
' regression model via scikit-learn')
print(50 * '-')
slr = LinearRegression()
slr.fit(X, y)
y_pred = slr.predict(X)
print('Slope: %.3f' % slr.coef_[0])
print('Intercept: %.3f' % slr.intercept_)
lin_regplot(X, y, slr)
plt.xlabel('Average number of rooms [RM]')
plt.ylabel('Price in $1000\'s [MEDV]')
# plt.tight_layout()
# plt.savefig('./figures/scikit_lr_fit.png', dpi=300)
plt.show()
# adding a column vector of "ones"
Xb = np.hstack((np.ones((X.shape[0], 1)), X))
w = np.zeros(X.shape[1])
z = np.linalg.inv(np.dot(Xb.T, Xb))
w = np.dot(z, np.dot(Xb.T, y))
print('Slope: %.3f' % w[1])
print('Intercept: %.3f' % w[0])
#############################################################################
print(50 * '=')
print('Section: Fitting a robust regression model using RANSAC')
print(50 * '-')
if Version(sklearn_version) < '0.18':
ransac = RANSACRegressor(LinearRegression(),
max_trials=100,
min_samples=50,
residual_metric=lambda x: np.sum(np.abs(x), axis=1),
residual_threshold=5.0,
random_state=0)
else:
ransac = RANSACRegressor(LinearRegression(),
max_trials=100,
min_samples=50,
loss='absolute_loss',
residual_threshold=5.0,
random_state=0)
ransac.fit(X, y)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
line_X = np.arange(3, 10, 1)
line_y_ransac = ransac.predict(line_X[:, np.newaxis])
plt.scatter(X[inlier_mask], y[inlier_mask],
c='blue', marker='o', label='Inliers')
plt.scatter(X[outlier_mask], y[outlier_mask],
c='lightgreen', marker='s', label='Outliers')
plt.plot(line_X, line_y_ransac, color='red')
plt.xlabel('Average number of rooms [RM]')
plt.ylabel('Price in $1000\'s [MEDV]')
plt.legend(loc='upper left')
# plt.tight_layout()
# plt.savefig('./figures/ransac_fit.png', dpi=300)
plt.show()
print('Slope: %.3f' % ransac.estimator_.coef_[0])
print('Intercept: %.3f' % ransac.estimator_.intercept_)
#############################################################################
print(50 * '=')
print('Section: Evaluating the performance of linear regression models')
print(50 * '-')
X = df.iloc[:, :-1].values
y = df['MEDV'].values
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=0)
slr = LinearRegression()
slr.fit(X_train, y_train)
y_train_pred = slr.predict(X_train)
y_test_pred = slr.predict(X_test)
plt.scatter(y_train_pred, y_train_pred - y_train,
c='blue', marker='o', label='Training data')
plt.scatter(y_test_pred, y_test_pred - y_test,
c='lightgreen', marker='s', label='Test data')
plt.xlabel('Predicted values')
plt.ylabel('Residuals')
plt.legend(loc='upper left')
plt.hlines(y=0, xmin=-10, xmax=50, lw=2, color='red')
plt.xlim([-10, 50])
# plt.tight_layout()
# plt.savefig('./figures/slr_residuals.png', dpi=300)
plt.show()
print('MSE train: %.3f, test: %.3f' % (
mean_squared_error(y_train, y_train_pred),
mean_squared_error(y_test, y_test_pred)))
print('R^2 train: %.3f, test: %.3f' % (
r2_score(y_train, y_train_pred),
r2_score(y_test, y_test_pred)))
#############################################################################
print(50 * '=')
print('Section: Using regularized methods for regression')
print(50 * '-')
print('LASSO Coefficients')
lasso = Lasso(alpha=0.1)
lasso.fit(X_train, y_train)
y_train_pred = lasso.predict(X_train)
y_test_pred = lasso.predict(X_test)
print(lasso.coef_)
print('MSE train: %.3f, test: %.3f' % (
mean_squared_error(y_train, y_train_pred),
mean_squared_error(y_test, y_test_pred)))
print('R^2 train: %.3f, test: %.3f' % (
r2_score(y_train, y_train_pred),
r2_score(y_test, y_test_pred)))
#############################################################################
print(50 * '=')
print('Section: Turning a linear regression model into a curve'
' - polynomial regression')
print(50 * '-')
X = np.array([258.0, 270.0, 294.0,
320.0, 342.0, 368.0,
396.0, 446.0, 480.0, 586.0])[:, np.newaxis]
y = np.array([236.4, 234.4, 252.8,
298.6, 314.2, 342.2,
360.8, 368.0, 391.2,
390.8])
lr = LinearRegression()
pr = LinearRegression()
quadratic = PolynomialFeatures(degree=2)
X_quad = quadratic.fit_transform(X)
# fit linear features
lr.fit(X, y)
X_fit = np.arange(250, 600, 10)[:, np.newaxis]
y_lin_fit = lr.predict(X_fit)
# fit quadratic features
pr.fit(X_quad, y)
y_quad_fit = pr.predict(quadratic.fit_transform(X_fit))
# plot results
plt.scatter(X, y, label='training points')
plt.plot(X_fit, y_lin_fit, label='linear fit', linestyle='--')
plt.plot(X_fit, y_quad_fit, label='quadratic fit')
plt.legend(loc='upper left')
# plt.tight_layout()
# plt.savefig('./figures/poly_example.png', dpi=300)
plt.show()
y_lin_pred = lr.predict(X)
y_quad_pred = pr.predict(X_quad)
print('Training MSE linear: %.3f, quadratic: %.3f' % (
mean_squared_error(y, y_lin_pred),
mean_squared_error(y, y_quad_pred)))
print('Training R^2 linear: %.3f, quadratic: %.3f' % (
r2_score(y, y_lin_pred),
r2_score(y, y_quad_pred)))
#############################################################################
print(50 * '=')
print('Section: Modeling nonlinear relationships in the Housing Dataset')
print(50 * '-')
X = df[['LSTAT']].values
y = df['MEDV'].values
regr = LinearRegression()
# create quadratic features
quadratic = PolynomialFeatures(degree=2)
cubic = PolynomialFeatures(degree=3)
X_quad = quadratic.fit_transform(X)
X_cubic = cubic.fit_transform(X)
# fit features
X_fit = np.arange(X.min(), X.max(), 1)[:, np.newaxis]
regr = regr.fit(X, y)
y_lin_fit = regr.predict(X_fit)
linear_r2 = r2_score(y, regr.predict(X))
regr = regr.fit(X_quad, y)
y_quad_fit = regr.predict(quadratic.fit_transform(X_fit))
quadratic_r2 = r2_score(y, regr.predict(X_quad))
regr = regr.fit(X_cubic, y)
y_cubic_fit = regr.predict(cubic.fit_transform(X_fit))
cubic_r2 = r2_score(y, regr.predict(X_cubic))
# plot results
plt.scatter(X, y, label='training points', color='lightgray')
plt.plot(X_fit, y_lin_fit,
label='linear (d=1), $R^2=%.2f$' % linear_r2,
color='blue',
lw=2,
linestyle=':')
plt.plot(X_fit, y_quad_fit,
label='quadratic (d=2), $R^2=%.2f$' % quadratic_r2,
color='red',
lw=2,
linestyle='-')
plt.plot(X_fit, y_cubic_fit,
label='cubic (d=3), $R^2=%.2f$' % cubic_r2,
color='green',
lw=2,
linestyle='--')
plt.xlabel('% lower status of the population [LSTAT]')
plt.ylabel('Price in $1000\'s [MEDV]')
plt.legend(loc='upper right')
# plt.tight_layout()
# plt.savefig('./figures/polyhouse_example.png', dpi=300)
plt.show()
print('Transforming the dataset')
X = df[['LSTAT']].values
y = df['MEDV'].values
# transform features
X_log = np.log(X)
y_sqrt = np.sqrt(y)
# fit features
X_fit = np.arange(X_log.min()-1, X_log.max()+1, 1)[:, np.newaxis]
regr = regr.fit(X_log, y_sqrt)
y_lin_fit = regr.predict(X_fit)
linear_r2 = r2_score(y_sqrt, regr.predict(X_log))
# plot results
plt.scatter(X_log, y_sqrt, label='training points', color='lightgray')
plt.plot(X_fit, y_lin_fit,
label='linear (d=1), $R^2=%.2f$' % linear_r2,
color='blue',
lw=2)
plt.xlabel('log(% lower status of the population [LSTAT])')
plt.ylabel('$\sqrt{Price \; in \; \$1000\'s [MEDV]}$')
plt.legend(loc='lower left')
# plt.tight_layout()
# plt.savefig('./figures/transform_example.png', dpi=300)
plt.show()
#############################################################################
print(50 * '=')
print('Section: Decision tree regression')
print(50 * '-')
X = df[['LSTAT']].values
y = df['MEDV'].values
tree = DecisionTreeRegressor(max_depth=3)
tree.fit(X, y)
sort_idx = X.flatten().argsort()
lin_regplot(X[sort_idx], y[sort_idx], tree)
plt.xlabel('% lower status of the population [LSTAT]')
plt.ylabel('Price in $1000\'s [MEDV]')
# plt.savefig('./figures/tree_regression.png', dpi=300)
plt.show()
#############################################################################
print(50 * '=')
print('Section: Random forest regression')
print(50 * '-')
X = df.iloc[:, :-1].values
y = df['MEDV'].values
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.4, random_state=1)
forest = RandomForestRegressor(n_estimators=1000,
criterion='mse',
random_state=1,
n_jobs=-1)
forest.fit(X_train, y_train)
y_train_pred = forest.predict(X_train)
y_test_pred = forest.predict(X_test)
print('MSE train: %.3f, test: %.3f' % (
mean_squared_error(y_train, y_train_pred),
mean_squared_error(y_test, y_test_pred)))
print('R^2 train: %.3f, test: %.3f' % (
r2_score(y_train, y_train_pred),
r2_score(y_test, y_test_pred)))
plt.scatter(y_train_pred,
y_train_pred - y_train,
c='black',
marker='o',
s=35,
alpha=0.5,
label='Training data')
plt.scatter(y_test_pred,
y_test_pred - y_test,
c='lightgreen',
marker='s',
s=35,
alpha=0.7,
label='Test data')
plt.xlabel('Predicted values')
plt.ylabel('Residuals')
plt.legend(loc='upper left')
plt.hlines(y=0, xmin=-10, xmax=50, lw=2, color='red')
plt.xlim([-10, 50])
# plt.tight_layout()
# plt.savefig('./figures/slr_residuals.png', dpi=300)
plt.show()
| mit |
maxlikely/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 4 | 2826 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD
import numpy as np
import pylab as pl
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils.fixes import unique
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_depth': 2, 'random_state': 1,
'min_samples_split': 5}
pl.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
pl.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
pl.legend(loc='upper left')
pl.xlabel('Boosting Iterations')
pl.ylabel('Test Set Deviance')
pl.show()
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/utils/deprecation.py | 1 | 2418 | import warnings
__all__ = ["deprecated", ]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecation.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
| mit |
DonBeo/scikit-learn | sklearn/neighbors/tests/test_kde.py | 13 | 5622 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
BBN-Q/Auspex | utils/auspex-plot-client.py | 1 | 28017 | #!/usr/bin/env python
# auspex-specific implementation by Graham Rowlands
# (C) 2019 Raytheon BBN Technologies
# Original File:
# embedding_in_qt5.py --- Simple Qt5 application embedding matplotlib canvases
#
# Copyright (C) 2005 Florent Rougon
# 2006 Darren Dale
# 2015 Jens H Nielsen
#
# This file is an example program for matplotlib. It may be used and
# modified with no restriction; raw copies as well as modified versions
# may be distributed without limitation.
from __future__ import unicode_literals
import sys
import os
import time
import random
import json
import ctypes
single_window = True
plot_windows = []
import logging
logger = logging.getLogger('auspex_plot_client')
logger.setLevel(logging.INFO)
logging.basicConfig(format='%(name)s-%(levelname)s: %(asctime)s ----> %(message)s')
from scipy.spatial import Delaunay
import matplotlib
# Make sure that we are using QT5
matplotlib.use('Qt5Agg')
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtGui import QIcon
import numpy as np
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.pyplot import subplots
progname = os.path.basename(sys.argv[0])
progversion = "0.5"
import zmq
main_app_mdi = None
class DataListener(QtCore.QObject):
message = QtCore.pyqtSignal(tuple)
finished = QtCore.pyqtSignal()
def __init__(self, host, uuid, num_plots, port=7772):
QtCore.QObject.__init__(self)
self.uuid = uuid
self.context = zmq.Context()
self.socket = self.context.socket(zmq.SUB)
self.num_plots = num_plots
self.socket.connect("tcp://{}:{}".format(host, port))
self.socket.setsockopt_string(zmq.SUBSCRIBE, uuid)
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
self.running = True
def loop(self):
while self.running:
done_plots = 0
socks = dict(self.poller.poll(1000))
if socks.get(self.socket) == zmq.POLLIN:
msg = self.socket.recv_multipart()
msg_type = msg[1].decode()
uuid = msg[0].decode()
name = msg[2].decode()
if msg_type == "done":
done_plots += 1
if done_plots == self.num_plots:
self.finished.emit()
logger.debug(f"Data listener thread for {self.uuid} got done message.")
elif msg_type == "data":
result = [name, uuid]
# How many pairs of metadata and data are there?
num_arrays = int((len(msg) - 3)/2)
for i in range(num_arrays):
md, data = msg[3+2*i:5+2*i]
md = json.loads(md.decode())
A = np.frombuffer(data, dtype=md['dtype'])
result.append(A)
self.message.emit(tuple(result))
self.socket.close()
self.context.term()
logger.debug(f"Data listener thread for {self.uuid} exiting.")
class DescListener(QtCore.QObject):
new_plot = QtCore.pyqtSignal(tuple)
first_plot = QtCore.pyqtSignal()
def __init__(self, host, port=7771):
QtCore.QObject.__init__(self)
self.got_plot = False
self.context = zmq.Context()
self.socket = self.context.socket(zmq.DEALER)
self.socket.identity = "Matplotlib_Qt_Client".encode()
self.socket.connect("tcp://{}:{}".format(host, port))
self.socket.send_multipart([b"new_client"])
self.socket.setsockopt(zmq.LINGER, 0)
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
time.sleep(0.1)
logger.debug("desc listener init")
self.running = True
def loop(self):
logger.debug("desc loop")
while self.running:
evts = dict(self.poller.poll(1000))
if self.socket in evts and evts[self.socket] == zmq.POLLIN:
msg_type, uuid, desc = [e.decode() for e in self.socket.recv_multipart()]
logger.debug(f"GOT: {msg_type}, {uuid}, {desc}")
if msg_type == "new":
if not self.got_plot:
self.got_plot = True
self.first_plot.emit()
self.new_plot.emit(tuple([uuid, desc]))
logger.debug("Desc listener at end")
self.socket.close()
self.context.term()
def label_offset(ax): #, axis="y"):
ax.xaxis.offsetText.set_visible(False)
ax.yaxis.offsetText.set_visible(False)
def update_label(event_axes):
if event_axes:
old_xlabel = event_axes.get_xlabel()
old_ylabel = event_axes.get_ylabel()
if " (10" in old_xlabel:
old_xlabel = old_xlabel.split(" (10")[0]
if " (10" in old_ylabel:
old_ylabel = old_ylabel.split(" (10")[0]
offset_x = event_axes.xaxis.get_major_formatter().orderOfMagnitude
offset_y = event_axes.yaxis.get_major_formatter().orderOfMagnitude
if offset_x != 0:
offset_x = r" (10$^{"+ str(offset_x) + r"}$)"
else:
offset_x = ''
if offset_y != 0:
offset_y = r" (10$^{"+ str(offset_y) + r"}$)"
else:
offset_y = ''
ax.set_xlabel(old_xlabel + offset_x)
ax.set_ylabel(old_ylabel + offset_y)
ax.callbacks.connect("ylim_changed", update_label)
ax.callbacks.connect("xlim_changed", update_label)
ax.figure.canvas.draw()
return
class MplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None, width=5, height=4, dpi=100, plot_mode="quad"):
# self.fig = Figure(figsize=(width, height), dpi=dpi)
self.plots = []
self.dpi = dpi
if plot_mode == "quad":
self.fig, _axes = subplots(2, 2, figsize=(width, height), sharex=True, sharey=False, constrained_layout=False)
self.real_axis = _axes[0,0]
self.imag_axis = _axes[0,1]
self.abs_axis = _axes[1,0]
self.phase_axis = _axes[1,1]
self.axes = [self.real_axis, self.imag_axis, self.abs_axis, self.phase_axis]
self.func_names = ["Real", "Imag", "Abs", "Phase"]
self.plot_funcs = [np.real, np.imag, np.abs, np.angle]
elif plot_mode == "real":
self.fig, self.real_axis = subplots(1, 1, figsize=(width, height), sharex=True, sharey=False, constrained_layout=False)
self.axes = [self.real_axis]
self.func_names = ["Real"]
self.plot_funcs = [np.real]
elif plot_mode == "imag":
self.fig, self.imag_axis = subplots(1, 1, figsize=(width, height), sharex=True, sharey=False, constrained_layout=False)
self.axes = [self.imag_axis]
self.func_names = ["Imag"]
self.plot_funcs = [np.imag]
elif plot_mode == "amp":
self.fig, self.abs_axis = subplots(1, 1, figsize=(width, height), sharex=True, sharey=False, constrained_layout=False)
self.axes = [self.abs_axis]
self.func_names = ["Amp"]
self.plot_funcs = [np.abs]
elif plot_mode == "real/imag":
self.fig, _axes = subplots(1, 2, figsize=(width, height), sharex=True, sharey=False, constrained_layout=False)
self.real_axis = _axes[0]
self.imag_axis = _axes[1]
self.axes = [self.real_axis, self.imag_axis]
self.func_names = ["Real", "Imag"]
self.plot_funcs = [np.real, np.imag]
elif plot_mode == "amp/phase":
self.fig, _axes = subplots(1, 2, figsize=(width, height), sharex=True, sharey=False, constrained_layout=False)
self.abs_axis = _axes[0]
self.phase_axis = _axes[1]
self.axes = [self.abs_axis, self.phase_axis]
self.func_names = ["Amp", "Phase"]
self.plot_funcs = [np.abs, np.angle]
for ax in self.axes:
# ax.ticklabel_format(useOffset=False)
ax._orig_xlabel = ""
ax._orig_ylabel = ""
label_offset(ax)
self.fig.set_dpi(dpi)
self.compute_initial_figure()
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
# for ax in self.axes:
# print("canvac init", ax.xaxis.get_offset_text(), ax.yaxis.get_offset_text())
def compute_initial_figure(self):
pass
class Canvas1D(MplCanvas):
def compute_initial_figure(self):
for ax in self.axes:
plt, = ax.plot([0,0,0], marker="o", markersize=4)
ax.ticklabel_format(style='sci', axis='x', scilimits=(-3,3))
ax.ticklabel_format(style='sci', axis='y', scilimits=(-3,3))
self.plots.append(plt)
def update_figure(self, data):
x_data, y_data = data
for plt, ax, f in zip(self.plots, self.axes, self.plot_funcs):
plt.set_xdata(x_data)
plt.set_ydata(f(y_data))
ax.relim()
ax.autoscale_view()
self.draw()
self.flush_events()
def set_desc(self, desc):
for ax, name in zip(self.axes, self.func_names):
if 'x_label' in desc.keys():
ax.set_xlabel(desc['x_label'])
if 'y_label' in desc.keys():
ax.set_ylabel(name + " " + desc['y_label'])
for plt in self.plots:
plt.set_xdata(np.linspace(desc['x_min'], desc['x_max'], desc['x_len']))
plt.set_ydata(np.nan*np.linspace(desc['x_min'], desc['x_max'], desc['x_len']))
class CanvasManual(MplCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=100, numplots=1):
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = []
for n in range(numplots):
self.axes += [self.fig.add_subplot(100+10*(numplots)+n+1)]
self.axes[n].ticklabel_format(style='sci', axis='x', scilimits=(-3,3))
self.axes[n].ticklabel_format(style='sci', axis='y', scilimits=(-3,3))
self.traces = {}
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def compute_initial_figure(self):
pass
def update_trace(self, trace_name, x_data, y_data):
self.traces[trace_name]['plot'].set_xdata(x_data)
self.traces[trace_name]['plot'].set_ydata(y_data)
curr_axis = self.axes[self.traces[trace_name]['axis_num']]
curr_axis.relim()
curr_axis.autoscale_view()
if len(self.traces)>1:
curr_axis.legend()
self.draw()
self.flush_events()
def set_desc(self, desc):
for k, ax in enumerate(self.axes):
if 'x_label' in desc.keys():
ax._orig_xlabel = desc['x_label'][k]
ax.set_xlabel(desc['x_label'][k])
if 'y_label' in desc.keys():
ax._orig_ylabel = desc['y_label'][k]
ax.set_ylabel(desc['y_label'][k])
if 'y_lim' in desc.keys() and desc['y_lim']:
ax.set_ylim(*desc['y_lim'])
for trace in desc['traces']: # relink traces and axes
self.traces[trace['name']] = {'plot': self.axes[trace['axis_num']].plot([], label=trace['name'], **trace['matplotlib_kwargs'])[0], 'axis_num': trace['axis_num']}
class Canvas2D(MplCanvas):
def compute_initial_figure(self):
for ax in self.axes:
plt = ax.imshow(np.zeros((10,10)))
ax.ticklabel_format(style='sci', axis='x', scilimits=(-3,3), useOffset=False)
ax.ticklabel_format(style='sci', axis='y', scilimits=(-3,3), useOffset=False)
self.plots.append(plt)
def update_figure(self, data):
x_data, y_data, im_data = data
im_data = im_data.reshape((len(y_data), len(x_data)), order='c')
for plt, f in zip(self.plots, self.plot_funcs):
plt.set_data(f(im_data))
plt.autoscale()
self.draw()
self.flush_events()
def set_desc(self, desc):
self.aspect = "auto"# (desc['x_max']-desc['x_min'])/(desc['y_max']-desc['y_min'])
self.extent = (desc['x_min'], desc['x_max'], desc['y_min'], desc['y_max'])
self.xlen = desc['x_len']
self.ylen = desc['y_len']
self.plots = []
for ax in self.axes:
ax.clear()
ax.ticklabel_format(style='sci', axis='x', scilimits=(-3,3), useOffset=False)
ax.ticklabel_format(style='sci', axis='y', scilimits=(-3,3), useOffset=False)
plt = ax.imshow(np.zeros((self.xlen, self.ylen)),
animated=True, aspect=self.aspect, extent=self.extent, origin="lower")
self.plots.append(plt)
self.draw() # For offsets to update
for ax, name in zip(self.axes, self.func_names):
offset_x = ax.xaxis.get_major_formatter().orderOfMagnitude
offset_y = ax.yaxis.get_major_formatter().orderOfMagnitude
if offset_x != 0:
offset_x = r" (10$^{"+ str(offset_x) + r"}$)"
else:
offset_x = ''
if offset_y != 0:
offset_y = r" (10$^{"+ str(offset_y) + r"}$)"
else:
offset_y = ''
if 'x_label' in desc.keys():
ax._orig_xlabel = desc['x_label']
ax.set_xlabel(desc['x_label'] + offset_x)
if 'y_label' in desc.keys():
ax._orig_ylabel = name + " " + desc['y_label']
ax.set_ylabel(name + " " + desc['y_label'] + offset_y)
class CanvasMesh(MplCanvas):
def compute_initial_figure(self):
# data = np.array([[0,0,0],[0,1,0],[1,1,0],[1,0,0]])
# self.update_figure(np.array(data))
pass
def update_figure(self, data):
# Expected xs, ys, zs coming in as
# data = np.array([xs, ys, zs]).transpose()
data = data.reshape((-1, 3), order='c')
points = data[:,0:2]
mesh = self.scaled_Delaunay(points)
xs = mesh.points[:,0]
ys = mesh.points[:,1]
for ax, f in zip(self.axes, self.plot_funcs):
ax.clear()
ax.tripcolor(xs, ys, mesh.simplices, f(data[:,2]), cmap="RdGy", shading="flat")
ax.autoscale()
self.draw()
self.flush_events()
def set_desc(self, desc):
self.plots = []
for ax, name in zip(self.axes, self.func_names):
if 'x_label' in desc.keys():
ax.set_xlabel(desc['x_label'])
if 'y_label' in desc.keys():
ax.set_ylabel(name + " " + desc['y_label'])
ax.ticklabel_format(style='sci', axis='x', scilimits=(-3,3))
ax.ticklabel_format(style='sci', axis='y', scilimits=(-3,3))
def scaled_Delaunay(self, points):
""" Return a scaled Delaunay mesh and scale factors """
scale_factors = []
points = np.array(points)
for i in range(points.shape[1]):
scale_factors.append(1.0/np.mean(points[:,i]))
points[:,i] = points[:,i]*scale_factors[-1]
mesh = Delaunay(points)
for i in range(points.shape[1]):
mesh.points[:,i] = mesh.points[:,i]/scale_factors[i]
return mesh
class MatplotWindowMixin(object):
def build_main_window(self, setMethod = None):
self.main_widget = QtWidgets.QWidget(self)
self.main_widget.setMinimumWidth(800)
self.main_widget.setMinimumHeight(600)
self.layout = QtWidgets.QVBoxLayout(self.main_widget)
self.main_widget.setFocus()
if setMethod:
setMethod(self.main_widget)
def init_comms(self):
self.context = zmq.Context()
self.uuid = None
self.data_listener_thread = None
def toggleAutoClose(self, state):
global single_window
single_window = state
def listen_for_data(self, uuid, num_plots, address="localhost", data_port=7772):
self.uuid = uuid
self.data_listener_thread = QtCore.QThread()
self.Datalistener = DataListener(address, uuid, num_plots, port=data_port)
self.Datalistener.moveToThread(self.data_listener_thread)
self.data_listener_thread.started.connect(self.Datalistener.loop)
self.Datalistener.message.connect(self.data_signal_received)
self.Datalistener.finished.connect(self.stop_listening)
QtCore.QTimer.singleShot(0, self.data_listener_thread.start)
def construct_plots(self, plot_desc):
self.toolbars = []
self.canvas_by_name = {}
# Purge everything in the layout
for i in reversed(range(self.layout.count())):
widgetToRemove = self.layout.itemAt( i ).widget()
self.layout.removeWidget( widgetToRemove )
widgetToRemove.setParent( None )
self.tabs = QtWidgets.QTabWidget(self.main_widget)
for name, desc in plot_desc.items():
if desc['plot_type'] == "standard":
if desc['plot_dims'] == 1:
canvas = Canvas1D(self.main_widget, width=5, height=4, dpi=100, plot_mode=desc['plot_mode'])
if desc['plot_dims'] == 2:
canvas = Canvas2D(self.main_widget, width=5, height=4, dpi=100, plot_mode=desc['plot_mode'])
elif desc['plot_type'] == "manual":
canvas = CanvasManual(self.main_widget, width=5, height=4, dpi=100, numplots=desc['numplots'])
elif desc['plot_type'] == "mesh":
canvas = CanvasMesh(self.main_widget, width=5, height=4, dpi=100, plot_mode=desc['plot_mode'])
nav = NavigationToolbar(canvas, self)
canvas.set_desc(desc)
self.toolbars.append(nav)
self.tabs.addTab(canvas, name)
self.layout.addWidget(nav)
self.canvas_by_name[name] = canvas
self.layout.addWidget(self.tabs)
self.switch_toolbar()
self.tabs.currentChanged.connect(self.switch_toolbar)
self.statusBar = QtWidgets.QStatusBar(self.main_widget)
self.layout.addWidget(self.statusBar)
def data_signal_received(self, message):
plot_name = message[0]
uuid = message[1]
data = message[2:]
if uuid == self.uuid:
try:
# If we see a colon, then we must look for a named trace
if ":" in plot_name:
plot_name, trace_name = plot_name.split(":")
self.canvas_by_name[plot_name].update_trace(trace_name, *data)
else:
if isinstance(self.canvas_by_name[plot_name], CanvasMesh):
self.canvas_by_name[plot_name].update_figure(data[0])
else:
self.canvas_by_name[plot_name].update_figure(data)
except Exception as e:
self.statusBar.showMessage("Exception while plotting {}. Length of data: {}".format(e, len(data)), 1000)
def switch_toolbar(self):
if len(self.toolbars) > 0:
for toolbar in self.toolbars:
toolbar.setVisible(False)
self.toolbars[self.tabs.currentIndex()].setVisible(True)
def stop_listening(self):
if self.data_listener_thread and self.Datalistener.running:
# update status bar if possible
try:
self.statusBar.showMessage("Disconnecting from server.", 10000)
except:
pass
self.Datalistener.running = False
self.data_listener_thread.quit()
self.data_listener_thread.wait()
def closeEvent(self, event):
self._quit()
class MatplotClientSubWindow(MatplotWindowMixin,QtWidgets.QMdiSubWindow):
def __init__(self):
global single_window
QtWidgets.QMainWindow.__init__(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowTitle("Auspex Plotting")
self.build_main_window(self.setWidget)
self.init_comms()
def _quit(self):
self.stop_listening()
self.close()
class MatplotClientWindow(MatplotWindowMixin, QtWidgets.QMainWindow):
def __init__(self, close_window=None):
global single_window
if close_window == None:
close_window = single_window
self.close_window = close_window
QtWidgets.QMainWindow.__init__(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowTitle("Auspex Plotting")
self.file_menu = self.menuBar().addMenu('&File')
self.file_menu.addAction('&Quit', self._quit,
QtCore.Qt.CTRL + QtCore.Qt.Key_Q)
self.file_menu.addAction('&Close All', close_all_plotters,
QtCore.Qt.SHIFT + QtCore.Qt.CTRL + QtCore.Qt.Key_W)
self.recent = self.file_menu.addMenu("Open Recent")
self.settings_menu = self.menuBar().addMenu('&Settings')
auto_close = QtWidgets.QAction('Auto Close Plots', self, checkable=True)
auto_close.setChecked(close_window)
self.settings_menu.addAction(auto_close)
self.debug_menu = self.menuBar().addMenu('&Debug')
self.debug_menu.addAction('&Debug', self._debug)
auto_close.triggered.connect(self.toggleAutoClose)
self.build_main_window(self.setCentralWidget)
self.init_comms()
def toggleAutoClose(self, state):
global single_window
single_window = state
def _debug(self):
import ipdb; ipdb.set_trace();
def _quit(self):
self.stop_listening()
plotters = [w for w in QtWidgets.QApplication.topLevelWidgets() if isinstance(w, MatplotClientWindow)]
if len(plotters) <= 1:
# This is the last plotter window:
wait_window.show()
self.close()
def new_plotter_window(message):
global single_window
reset_windows = False
close_window = single_window
uuid, desc = message
desc = json.loads(desc)
for plot in desc.values():
if single_window and plot['plot_type'] == 'manual':
close_window = False
reset_windows = True
break
pw = MatplotClientWindow(close_window = close_window)
pw.setWindowTitle("%s" % progname)
pw.show()
pw.setWindowState(pw.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
pw.activateWindow()
pw.construct_plots(desc)
pw.listen_for_data(uuid, len(desc))
for w in plot_windows:
if w.close_window or reset_windows:
w.closeEvent(0)
plot_windows.remove(w)
plot_windows.append(pw)
def new_plotter_window_mdi(message):
uuid, desc = message
desc = json.loads(desc)
pw = MatplotClientSubWindow()
pw.setWindowTitle("%s" % progname)
pw.construct_plots(desc)
pw.listen_for_data(uuid, len(desc))
if single_window:
for window in main_app_mdi.subWindowList():
window.close()
main_app_mdi.addSubWindow(pw)
pw.show()
def close_all_plotters():
for w in plot_windows:
w.closeEvent(0)
time.sleep(0.01)
plot_windows.remove(w)
plotters = [w for w in QtWidgets.QApplication.topLevelWidgets() if isinstance(w, MatplotClientWindow)]
for w in plotters:
w.closeEvent(0)
time.sleep(0.01)
wait_window.show()
class ListenerMixin:
def start_listener(self, new_plot_callback):
# Start listener thread
self.desc_listener_thread = QtCore.QThread()
self.Desclistener = DescListener("localhost", 7771 )
self.Desclistener.moveToThread(self.desc_listener_thread)
self.desc_listener_thread.started.connect(self.Desclistener.loop)
self.Desclistener.new_plot.connect(new_plot_callback)
QtCore.QTimer.singleShot(0, self.desc_listener_thread.start)
def stop_listening(self, _):
self.Desclistener.running = False
self.desc_listener_thread.quit()
self.desc_listener_thread.wait()
def closeEvent(self, ce):
if self.desc_listener_thread:
self.stop_listening(True)
self.close()
class PlotMDI(ListenerMixin,QtWidgets.QMainWindow):
def __init__(self, parent = None):
global main_app_mdi
super(PlotMDI, self).__init__(parent)
self.mdi = QtWidgets.QMdiArea()
main_app_mdi = self.mdi
self.setCentralWidget(self.mdi)
self.setWindowTitle("Auspex Plots")
self.file_menu = self.menuBar().addMenu('&File')
self.file_menu.addAction('&Quit', self.close,
QtCore.Qt.CTRL + QtCore.Qt.Key_Q)
self.file_menu.addAction('&Close All', self.close_all_windows,
QtCore.Qt.SHIFT + QtCore.Qt.CTRL + QtCore.Qt.Key_W)
self.settings_menu = self.menuBar().addMenu('&Settings')
auto_close = QtWidgets.QAction('Auto Close Plots', self, checkable=True)
auto_close.setChecked(single_window)
self.settings_menu.addAction(auto_close)
auto_close.triggered.connect(self.toggleAutoClose)
self.windows_menu = self.menuBar().addMenu('&Windows')
self.windows_menu.addAction("Cascade", self.mdi.cascadeSubWindows)
self.windows_menu.addAction("Tiled", self.mdi.tileSubWindows)
self.start_listener(new_plotter_window_mdi)
def toggleAutoClose(self, state):
global single_window
single_window = state
def close_all_windows(self):
for window in self.mdi.subWindowList():
window.close()
class WaitAndListenWidget(ListenerMixin,QtWidgets.QWidget):
def __init__(self, parent=None):
super(WaitAndListenWidget, self).__init__(parent)
layout = QtWidgets.QVBoxLayout(self)
# Create a progress bar and a button and add them to the main layout
self.progressBar = QtWidgets.QProgressBar(self)
self.progressBar.setRange(0,0)
self.progressBar.setValue(0)
layout.addWidget(QtWidgets.QLabel("Waiting for an available Auspex plot..."))
layout.addWidget(self.progressBar)
button = QtWidgets.QPushButton("Quit", self)
layout.addWidget(button)
button.clicked.connect(self.closeEvent)
self.start_listener(new_plotter_window)
# Start listener thread
self.Desclistener.new_plot.connect(self.done_waiting)
def done_waiting(self, thing=None):
self.hide()
if __name__ == '__main__':
qApp = QtWidgets.QApplication(sys.argv)
# Setup icon
png_path = os.path.join(os.path.dirname(__file__), "../src/auspex/assets/plotter_icon.png")
qApp.setWindowIcon(QIcon(png_path))
# Convince windows that this is a separate application to get the task bar icon working
# https://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105
if (os.name == 'nt'):
myappid = u'BBN.auspex.auspex-plot-client.0001' # arbitrary string
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
if '--mdi' in sys.argv:
wait_window = PlotMDI()
else:
wait_window = WaitAndListenWidget()
wait_window.show()
sys.exit(qApp.exec_())
| apache-2.0 |
JosmanPS/scikit-learn | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
krez13/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 142 | 7183 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis], cols].sum() but
# much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
RyanChinSang/ECNG3020-ORSS4SCVI | Stable/v01/models/VideoStream.py | 2 | 3245 | import cv2
from threading import Thread
class VideoStream(object):
def __init__(self, src=0, height=None, width=None, ratio=None):
cv2.setUseOptimized(True)
self.stream = cv2.VideoCapture(src)
self.config(dim=None, height=height, width=width, ratio=ratio)
(self.grabbed, self.frame) = self.stream.read()
self.released = not self.grabbed
def start(self):
Thread(target=self.update, args=(), daemon=True).start()
return self
def update(self):
while True:
if self.released:
return
(self.grabbed, self.frame) = self.stream.read()
def read(self, width=None, height=None, ratio=None):
return (not self.released), self.resize(frame=self.frame, width=width, height=height, ratio=ratio)
def release(self):
self.stream.release()
self.released = True
def isOpened(self):
return not self.released
def config(self, dim, height, width, ratio):
if ratio is None:
if height and width:
dim = (self.round_up(height), self.round_up(height * float(width / height)))
elif not height and not width:
pass
else:
print("WARNING: Insufficient configuration parameters. The default was used.")
else:
if height:
dim = (self.round_up(height), self.round_up(height * float(ratio)))
elif width:
dim = (self.round_up(width / float(ratio)), self.round_up(width))
if dim is not None:
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, dim[0])
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, dim[1])
def resize(self, frame, width, height, ratio):
dim = (dheight, dwidth) = frame.shape[:2]
if ratio is None:
if width and height:
dim = (height, width)
elif width and height is None:
dim = ((dheight * (width / dwidth)), width)
elif width is None and height:
dim = (height, (dwidth * (height / dheight)))
else:
if width is None and height is None:
dim = (dheight, (dheight * ratio))
elif width is None and height:
dim = (height, (height * ratio))
elif width and height is None:
dim = ((width / ratio), width)
else:
if (width / height) == ratio:
dim = (height, width)
else:
print("WARNING: Window resolution (" + str(width) + "*" + str(height)
+ ") does not agree with ratio " + str(ratio) + ". The default was used.")
return cv2.resize(frame, (self.round_up(dim[1]), self.round_up(dim[0])), interpolation=cv2.INTER_AREA)
@staticmethod
def round_up(num):
return int(-(-num // 1))
if __name__ == '__main__':
cap = VideoStream().start()
while cap.isOpened():
ret, frame = cap.read()
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
'''
To add:
1- Native fps support
2- Using matplotlib as default?
'''
| gpl-3.0 |
rodrigokuroda/recominer | recominer-extractor/src/main/resources/scripts/classification.py | 1 | 2460 | porco1 = [1, 1, 0]
porco2 = [1, 1, 0]
porco3 = [1, 1, 0]
cachorro4 = [1, 1, 1]
cachorro5 = [0, 1, 1]
cachorro6 = [0, 1, 1]
dados = [porco1, porco2, porco3, cachorro4, cachorro5, cachorro6]
marcacoes = [1, 1, 1, -1, -1, -1]
import numpy as np
f = open("filename.txt")
f.readline() # skip the header
train = np.loadtxt('~/NetBeansProjects/extractor/generated/avro/259/1281/256/train.csv', skip_header=1, names="file1Id, file1, file2Id, file2, id, issueId, commitId, issueKey, issueType, issuePriority, issueAssignedTo, issueSubmittedBy, commenters, devCommenters, issueAge, wordinessBody, wordinessComments, comments, networkId, networkIssueId, networkCommitId, btwMdn, clsMdn, dgrMdn, efficiencyMdn, efvSizeMdn, constraintMdn, hierarchyMdn, size, ties, density, diameter, commitMetricId, commitMetricCommitId, revision, committer, fileMetricId, fileId, committers, commits, fileAge, addedLines, deletedLines, changedLines, cochanged", delimiter=';', dtype=[('file1Id', '<i4'), ('file1', '|S1024'), ('file2Id', '<i4'), ('file2', '|S1024'), ('id', '<i4'), ('issueId', '<i4'), ('commitId', '<i4'), ('issueKey', '|S32'), ('issueType', '|S32'), ('issuePriority', '|S32'), ('issueAssignedTo', '|S128'), ('issueSubmittedBy', '|S128'), ('commenters', '<i4'), ('devCommenters', '<i4'), ('issueAge', '<i4'), ('wordinessBody', '<i4'), ('wordinessComments', '<i4'), ('comments', '<i4'), ('networkId', '<i4'), ('networkIssueId', '<i4'), ('networkCommitId', '<i4'), ('btwMdn', '<f8'), ('clsMdn', '<f8'), ('dgrMdn', '<f8'), ('efficiencyMdn', '<f8'), ('efvSizeMdn', '<f8'), ('constraintMdn', '<f8'), ('hierarchyMdn', '<f8'), ('size', '<i4'), ('ties', '<i4'), ('density', '<f8'), ('diameter', '<i4'), ('commitMetricId', '<i4'), ('commitMetricCommitId', '<i4'), ('revision', '|S128'), ('committer', '|S128'), ('fileMetricId', '<i4'), ('fileId', '<i4'), ('committers', '<i4'), ('commits', '<i4'), ('fileAge', '<i4'), ('addedLines', '<i4'), ('deletedLines', '<i4'), ('changedLines', '<i4'), ('cochanged', '<i4')])
print(train)
#from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
#modelo = MultinomialNB()
modelo = RandomForestClassifier(n_estimators=10)
modelo.fit(dados, marcacoes)
misterioso1 = [1, 1, 1]
misterioso2 = [1, 0, 0]
misterioso3 = [0, 0, 1]
teste = [misterioso1, misterioso2, misterioso3]
marcacoes_teste = [-1, 1, -1]
resultado = modelo.predict(teste)
print(resultado)
| apache-2.0 |
vadimtk/chrome4sdp | chrome/test/nacl_test_injection/buildbot_nacl_integration.py | 94 | 3083 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
# This environment variable check mimics what
# buildbot_chrome_nacl_stage.py does.
is_win64 = (sys.platform in ('win32', 'cygwin') and
('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')))
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# http://code.google.com/p/nativeclient/issues/detail?id=2511
tests_to_disable.append('run_ppapi_ppb_image_data_browser_test')
if sys.platform == 'darwin':
# TODO(mseaborn) fix
# http://code.google.com/p/nativeclient/issues/detail?id=1835
tests_to_disable.append('run_ppapi_crash_browser_test')
if sys.platform in ('win32', 'cygwin'):
# This one is only failing for nacl_glibc on x64 Windows but it is not
# clear how to disable only that limited case.
# See http://crbug.com/132395
tests_to_disable.append('run_inbrowser_test_runner')
# run_breakpad_browser_process_crash_test is flaky.
# See http://crbug.com/317890
tests_to_disable.append('run_breakpad_browser_process_crash_test')
# See http://crbug.com/332301
tests_to_disable.append('run_breakpad_crash_in_syscall_test')
# It appears that crash_service.exe is not being reliably built by
# default in the CQ. See: http://crbug.com/380880
tests_to_disable.append('run_breakpad_untrusted_crash_test')
tests_to_disable.append('run_breakpad_trusted_crash_in_startup_test')
script_dir = os.path.dirname(os.path.abspath(__file__))
nacl_integration_script = os.path.join(script_dir,
'buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
# TODO(ncbray) re-enable.
# https://code.google.com/p/chromium/issues/detail?id=133568
'--disable_glibc',
'--disable_tests=%s' % ','.join(tests_to_disable)]
cmd += args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
lewisc/spark-tk | regression-tests/sparktkregtests/testcases/graph/graph_connected_test.py | 11 | 2429 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test connected_components graphx, Valuesare checked against networkx"""
import unittest
from sparktkregtests.lib import sparktk_test
class ConnectedComponents(sparktk_test.SparkTKTestCase):
def test_connected_component(self):
""" Tests the graphx connected components in ATK"""
super(ConnectedComponents, self).setUp()
graph_data = self.get_file("clique_10.csv")
schema = [('src', str),
('dst', str)]
# set up the vertex frame, which is the union of the src and
# the dst columns of the edges
self.frame = self.context.frame.import_csv(graph_data, schema=schema)
self.vertices = self.frame.copy()
self.vertices2 = self.frame.copy()
self.vertices.rename_columns({"src": "id"})
self.vertices.drop_columns(["dst"])
self.vertices2.rename_columns({"dst": "id"})
self.vertices2.drop_columns(["src"])
self.vertices.append(self.vertices2)
self.vertices.drop_duplicates()
self.vertices.sort("id")
self.frame.add_columns(lambda x: 2, ("value", int))
self.graph = self.context.graph.create(self.vertices, self.frame)
components = self.graph.connected_components()
components.sort('id')
components.add_columns(
lambda x: x['id'].split('_')[1], ("element", str))
frame = components.to_pandas(components.count())
group = frame.groupby('component').agg(lambda x: x.nunique())
# Each component should only have 1 element value, the name of the
# component
for _, row in group.iterrows():
self.assertEqual(row['element'], 1)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
eramirem/astroML | book_figures/chapter5/fig_poisson_likelihood.py | 3 | 4970 | """
Binned Regression: Poisson vs Gaussian
--------------------------------------
Figure 5.15
The left panels show data sets with 50 points, binned in 5 bins (upper panels)
and 40 bins (lower panels). The curves show the input distribution (solid), the
Poisson solution (dashed), and the Gaussian solution (dotted). The right panels
show 1-sigma, 2-sigma, and 3-sigma likelihood contours for eqs. 5.91 (dark
lines) and 5.90 (light lines). With 5 bins (top row) there are enough counts
in each bin so that the Gaussian and Poisson predictions are very similar. As
the number of bins is increased, the counts decrease and the Gaussian
approximation becomes biased.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy import stats, interpolate
from astroML.stats.random import linear
from astroML.plotting.mcmc import convert_to_stdev
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def logL_gaussian(xi, yi, a, b):
"""gaussian log-likelihood (Eq. 5.87)"""
xi = xi.ravel()
yi = yi.ravel()
a = a.reshape(a.shape + (1,))
b = b.reshape(b.shape + (1,))
yyi = a * xi + b
return -0.5 * np.sum(np.log(yyi) + (yi - yyi) ** 2 / yyi, -1)
def logL_poisson(xi, yi, a, b):
"""poisson log-likelihood (Eq. 5.88)"""
xi = xi.ravel()
yi = yi.ravel()
a = a.reshape(a.shape + (1,))
b = b.reshape(b.shape + (1,))
yyi = a * xi + b
return np.sum(yi * np.log(yyi) - yyi, -1)
#------------------------------------------------------------
# Draw points from distribution
np.random.seed(0)
N = 50
a_true = 0.01
xmin = 0.0
xmax = 10.0
b_true = 1. / (xmax - xmin) - 0.5 * a_true * (xmax + xmin)
lin_dist = linear(xmin, xmax, a_true)
data = lin_dist.rvs(N)
#------------------------------------------------------------
# Compute and plot the results
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(left=0.1, right=0.95, wspace=0.3,
bottom=0.1, top=0.95, hspace=0.2)
a = np.linspace(0.00001, 0.04, 71)
b = np.linspace(0.00001, 0.15, 71)
for num, nbins in enumerate([5, 40]):
# divide points into bins
yi, bins = np.histogram(data, bins=np.linspace(xmin, xmax, nbins + 1))
xi = 0.5 * (bins[:-1] + bins[1:])
# compute likelihoods for Poisson and Gaussian models
factor = N * (xmax - xmin) * 1. / nbins
LP = logL_poisson(xi, yi, factor * a, factor * b[:, None])
LG = logL_gaussian(xi, yi, factor * a, factor * b[:, None])
LP -= np.max(LP)
LG -= np.max(LG)
# find maximum likelihood point
i, j = np.where(LP == np.max(LP))
aP, bP = a[j[0]], b[i[0]]
i, j = np.where(LG == np.max(LG))
aG, bG = a[j[0]], b[i[0]]
# plot scatter and lines
ax = fig.add_subplot(2, 2, 1 + 2 * num)
plt.scatter(xi, yi, s=9, c='gray', lw=0)
x = np.linspace(xmin - 1, xmax + 1, 1000)
for (ai, bi, s) in [(a_true, b_true, '-k'),
(aP, bP, '--k'),
(aG, bG, '-.k')]:
px = ai * x + bi
px[x < xmin] = 0
px[x > xmax] = 0
ax.plot(x, factor * px, s)
ax.set_xlim(xmin - 1, xmax + 1)
ax.set_xlabel('$x$')
ax.set_ylabel('$y_i$')
ax.text(0.04, 0.96,
r'$\rm %i\ points$' % N + '\n' + r'$\rm %i\ bins$' % nbins,
ha='left', va='top', transform=ax.transAxes)
# plot likelihood contours
ax = fig.add_subplot(2, 2, 2 + 2 * num)
ax.contour(a, b, convert_to_stdev(LP),
levels=(0.683, 0.955, 0.997),
colors='k', linewidths=2)
ax.contour(a, b, convert_to_stdev(LG),
levels=(0.683, 0.955, 0.997),
colors='gray', linewidths=1, linestyle='dashed')
# trick the legend command
ax.plot([0], [0], '-k', lw=2, label='Poisson Likelihood')
ax.plot([0], [0], '-', c='gray', lw=1, label='Gaussian Likelihood')
ax.legend(loc=1)
# plot horizontal and vertical lines
# in newer matplotlib versions, use ax.vlines() and ax.hlines()
ax.plot([a_true, a_true], [0, 0.2], ':k', lw=1)
ax.plot([0, 0.06], [b_true, b_true], ':k', lw=1)
ax.set_xlabel(r'$a^\ast$')
ax.set_ylabel(r'$b^\ast$')
ax.set_xlim(0, 0.04)
ax.set_ylim(0.001, 0.15)
ax.xaxis.set_major_locator(plt.MultipleLocator(0.02))
plt.show()
| bsd-2-clause |
atantet/QG4 | plot/plotFP.py | 1 | 5774 | import numpy as np
import matplotlib.pyplot as plt
import pylibconfig2
fs_latex = 'xx-large'
fs_xticklabels = 'large'
fs_yticklabels = fs_xticklabels
configFile = '../cfg/QG4.cfg'
cfg = pylibconfig2.Config()
cfg.read_file(configFile)
dim = cfg.model.dim
L = cfg.simulation.LCut + cfg.simulation.spinup
printStepNum = int(cfg.simulation.printStep / cfg.simulation.dt + 0.1)
caseName = cfg.model.caseName
fileFormat = cfg.general.fileFormat
delayName = ""
if (hasattr(cfg.model, 'delaysDays')):
for d in np.arange(len(cfg.model.delaysDays)):
delayName = "%s_d%d" % (delayName, cfg.model.delaysDays[d])
# List of continuations to plot
initContRng = [[0., 0., 0., 0., 0.],
[-0.1, 1.4, 0.43, -1.4, 0.3],
[-0.1, 1.4, 0.43, -1.4, 0.3]]
contStepRng = [0.001, 0.001, -0.001]
nCont = len(initContRng)
srcPostfix = "_%s%s" % (caseName, delayName)
resDir = '../results/'
contDir = '%s/continuation' % resDir
plotDir = '%s/plot/' % resDir
# Prepare plot
fig = plt.figure(figsize=(8, 10))
ax = []
nPan = 100*(1+2*nCont) + 10 + 1
ax.append(fig.add_subplot(nPan))
for k in np.arange(nCont):
nPan += 1
ax.append(fig.add_subplot(nPan))
nPan += 1
ax.append(fig.add_subplot(nPan))
fpL = []
eigL = []
contL = []
for k in np.arange(nCont):
initCont = initContRng[k]
contStep = contStepRng[k]
contAbs = sqrt(contStep*contStep)
sign = contStep / contAbs
exp = np.log10(contAbs)
mantis = sign * np.exp(np.log(contAbs) / exp)
dstPostfix = "%s_sigma%04d_sigmaStep%de%d" \
% (srcPostfix, int(initCont[dim] * 1000 + 0.1), int(mantis*1.01),
(int(exp*1.01)))
fpFileName = '%s/fpCont%s.%s' % (contDir, dstPostfix, fileFormat)
eigFileName = '%s/fpEigValCont%s.%s' % (contDir, dstPostfix, fileFormat)
# Read fixed point and cont
if (fileFormat == 'bin'):
# Read fixed point and cont
state = np.fromfile(fpFileName)
# Read FloquetExpenvalues
eig = np.fromfile(eigFileName)
else:
# Read fixed point and cont
state = np.loadtxt(fpFileName)
# Read eigenvalues
eig = np.loadtxt(eigFileName)
state = state[:(state.shape[0]/(dim+1)*(dim+1))]
state = state.reshape(-1, dim+1)
fp = state[:, :dim]
contRng = state[:, dim]
eig = eig[:(eig.shape[0]/2*2)]
eig = eig.reshape(-1, 2)
eig = (eig[:, 0] + 1j * eig[:, 1])
eig = eig[:(eig.shape[0]/dim*dim)]
eig = eig.reshape(-1, dim)
# Bound
isig = contRng < 1.
contRng = contRng[isig]
fp = fp[isig]
eig = eig[isig]
fpL.append(fp)
eigL.append(eig)
contL.append(contRng)
isStable = np.max(eig.real, 1) < 0
A13 = fp[:, 0] + fp[:, 2]
change = np.nonzero(~isStable)[0][0]
print 'Change of stability at cont = ', contRng[change]
print 'Fixed point at change of instability: ', fp[change]
print 'Characteristic exponents at instability: ', eig[change]
# # Save branches
# np.savetxt('%s/continuation/contBranch%d.txt' % (plotDir, k), contRng)
# np.savetxt('%s/continuation/fpBranch%d.txt' % (plotDir, k), fp)
# np.savetxt('%s/continuation/eigRealBranch%d.txt' % (plotDir, k), eig.real)
# np.savetxt('%s/continuation/eigImagBranch%d.txt' % (plotDir, k), eig.imag)
# Plot diagram
ax[0].plot(contRng[isStable], A13[isStable], '-k', linewidth=2)
ax[0].plot(contRng[~isStable], A13[~isStable], '--k', linewidth=2)
# Plot real parts
ax[1+2*k].plot(contRng, np.zeros((contRng.shape[0],)), '--k')
ax[1+2*k].plot(contRng, eig.real, linewidth=2)
ax[1+2*k].set_ylabel(r'$\Re(\lambda_i)$', fontsize=fs_latex)
plt.setp(ax[1+2*k].get_xticklabels(), fontsize=fs_xticklabels)
plt.setp(ax[1+2*k].get_yticklabels(), fontsize=fs_yticklabels)
ax[1+2*k].set_xlim(0., 1.)
# Plot imaginary parts
ax[1+2*k+1].plot(contRng, eig.imag, linewidth=2)
ax[1+2*k+1].set_ylabel(r'$\Im(\lambda_i)$', fontsize=fs_latex)
plt.setp(ax[1+2*k+1].get_xticklabels(), fontsize=fs_xticklabels)
plt.setp(ax[1+2*k+1].get_yticklabels(), fontsize=fs_yticklabels)
ax[1+2*k+1].set_xlim(0., 1.)
ax[0].set_ylabel(r'$A_1 + A_3$', fontsize=fs_latex)
ax[0].set_xlim(0., 1.)
plt.setp(ax[0].get_xticklabels(), fontsize=fs_xticklabels)
plt.setp(ax[0].get_yticklabels(), fontsize=fs_yticklabels)
ax[-1].set_xlabel(r'$\sigma$', fontsize=fs_latex)
plt.savefig('%s/continuation/fpCont%s.eps' % (plotDir, dstPostfix),
dpi=300, bbox_inches='tight')
plt.figure()
k = 1
initCont = initContRng[k]
contStep = contStepRng[k]
contAbs = sqrt(contStep*contStep)
sign = contStep / contAbs
exp = np.log10(contAbs)
mantis = sign * np.exp(np.log(contAbs) / exp)
dstPostfix = "%s_sigma%04d_sigmaStep%de%d" \
% (srcPostfix, int(initCont[dim] * 1000 + 0.1), int(mantis*1.01),
(int(exp*1.01)))
fpFileName = '%s/fpCont%s.%s' % (contDir, dstPostfix, fileFormat)
eigFileName = '%s/fpEigValCont%s.%s' % (contDir, dstPostfix, fileFormat)
# Read fixed point and cont
if (fileFormat == 'bin'):
# Read fixed point and cont
state = np.fromfile(fpFileName)
# Read FloquetExpenvalues
eig = np.fromfile(eigFileName)
else:
# Read fixed point and cont
state = np.loadtxt(fpFileName)
# Read eigenvalues
eig = np.loadtxt(eigFileName)
state = state[:(state.shape[0]/(dim+1)*(dim+1))]
state = state.reshape(-1, dim+1)
fp = state[:, :dim]
contRng = state[:, dim]
eig = eig[:(eig.shape[0]/2*2)]
eig = eig.reshape(-1, 2)
eig = (eig[:, 0] + 1j * eig[:, 1])
eig = eig[:(eig.shape[0]/dim*dim)]
eig = eig.reshape(-1, dim)
# Bound
isig = contRng < 1.
contRng = contRng[isig]
fp = fp[isig]
eig = eig[isig]
fpL.append(fp)
eigL.append(eig)
contL.append(contRng)
plt.plot(contRng, eig[:, 0] + eig[:, 1])
| gpl-3.0 |
anurag313/scikit-learn | examples/preprocessing/plot_function_transformer.py | 161 | 1949 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
| bsd-3-clause |
nwillemse/nctrader | examples/rsi2.py | 1 | 8242 | #!/usr/bin/env python
"""
RSI2.py
Standard trading system trading the 2-period RSI on SPY
Created on Mon Aug 15 19:20:57 2016
@author: nwillemse
"""
import click
import csv
import pandas as pd
import numpy as np
from datetime import datetime
from os import path, remove
from collections import OrderedDict
from nctrader import settings
from nctrader.compat import queue
from nctrader.price_parser import PriceParser
from nctrader.price_handler.sqlite_bar import SqliteBarPriceHandler
from nctrader.strategy.base import AbstractStrategy
from nctrader.position_sizer.fixed import FixedPositionSizer
from nctrader.risk_manager.example import ExampleRiskManager
from nctrader.portfolio_handler import PortfolioHandler
from nctrader.compliance.example import ExampleCompliance
from nctrader.execution_handler.ib_simulated import IBSimulatedExecutionHandler
from nctrader.statistics.basic import BasicStatistics
from nctrader.statistics.tearsheet import TearsheetStatistics
from nctrader.trading_session.backtest import Backtest
from nctrader.event import (SignalEvent, EventType)
def RSI(series, period=2):
delta = series.diff().dropna()
u = delta * 0
d = u.copy()
u[delta > 0] = delta[delta > 0]
d[delta < 0] = -delta[delta < 0]
u[u.index[period-1]] = np.mean( u[:period] ) #first value is sum of avg gains
u = u.drop(u.index[:(period-1)])
d[d.index[period-1]] = np.mean( d[:period] ) #first value is sum of avg losses
d = d.drop(d.index[:(period-1)])
rs = u.ewm(com=period-1, adjust=False).mean() / \
d.ewm(com=period-1, adjust=False).mean()
rsi = 100 - 100 / (1 + rs)
return rsi[-1]
class RSI2Strategy(AbstractStrategy):
"""
Requires:
tickers - The list of ticker symbols
events_queue - A handle to the system events queue
start_dt - Trading start datetime
end_dt - Trading end datetime
"""
def __init__(
self, config, tickers, events_queue
):
self.config = config
self.tickers = tickers
self.events_queue = events_queue
self.bars = pd.DataFrame(columns=['open', 'high', 'low', 'close'])
self.sma_length = 200
self.position = 'OUT'
self.explore_fname = self._get_explore_filename()
self.explore_header = True
def _get_explore_filename(self):
"""
"""
today = datetime.utcnow().date()
csv_filename = "explore_" + today.strftime("%Y-%m-%d") + ".csv"
fname = path.expanduser(path.join(self.config.OUTPUT_DIR, csv_filename))
try:
remove(fname)
except (IOError, OSError):
print("No exlore files to clean.")
return fname
def record_explore(self, info):
"""
"""
# Write row
with open(self.explore_fname, 'a') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=info.keys())
if self.explore_header:
writer.writeheader()
self.explore_header = False
writer.writerow(info)
def calc_trend(self, prices):
"""
"""
sma0 = np.mean(prices[-self.sma_length:]['close'])
sma1 = np.mean(prices[-(self.sma_length+1):-1]['close'])
sma2 = np.mean(prices[-(self.sma_length+2):-2]['close'])
sma3 = np.mean(prices[-(self.sma_length+3):-3]['close'])
if sma0 > sma1 and sma1 > sma2 and sma2 > sma3:
return 1
else:
return -1
def calculate_signals(self, event):
ticker = self.tickers[0]
if event.type == EventType.BAR and event.ticker == ticker:
d = OrderedDict()
d['timestamp'] = event.time
d['sig_ticker'] = ticker
d['sig_close'] = PriceParser.display(event.close_price)
d['trend'] = None
d['rsi'] = None
self.bars.loc[event.time] = (event.open_price, event.high_price,
event.low_price, event.close_price)
# Enough bars are present for trading
if len(self.bars) > 3:
trend = self.calc_trend(self.bars)
rsi = RSI(self.bars['close'])
d['trend'] = trend
d['rsi'] = rsi
# print ("Date: %s Ticker: %s Trend: %s RSI: %0.4f" % (
# event.time, ticker, trend, rsi)
# )
# Process Exit Signals
if self.position == 'LE' and rsi > 50:
signal = SignalEvent(ticker, "SLD")
self.events_queue.put(signal)
self.position = 'OUT'
print "%s Signal:LX %s trend:%s rsi:%0.4f" % (
event.time, ticker, trend, rsi)
if self.position == 'SE' and rsi < 50:
signal = SignalEvent(ticker, "BOT")
self.events_queue.put(signal)
self.position = 'OUT'
print "%s Signal:SX %s trend:%s rsi:%0.4f" % (
event.time, ticker, trend, rsi)
# Entry Signals
if self.position == 'OUT':
# LE
if rsi < 50:
signal = SignalEvent(ticker, "BOT")
self.events_queue.put(signal)
self.position = 'LE'
print "%s Signal:LE %s trend:%s rsi:%0.4f" % (
event.time, ticker, trend, rsi)
# SE
if rsi > 50:
signal = SignalEvent(ticker, "SLD")
self.events_queue.put(signal)
self.position = 'SE'
print "%s Signal:SE %s trend:%s rsi:%0.4f" % (
event.time, ticker, trend, rsi)
# Write explore
d['position'] = self.position
self.record_explore(d)
def run(config, testing, tickers):
# Benchmark ticker
benchmark = None
bt_start_dt = datetime(2000, 4, 1)
trd_start_dt = datetime(2000, 4, 1)
end_dt = datetime(2000, 12, 31)
#start_dt = datetime(2016, 1, 1)
#end_dt = datetime(2016, 8, 1)
# Set up variables needed for backtest
title = [
'RSI2',
path.basename(__file__),
','.join(tickers)
]
events_queue = queue.Queue()
sqlite_db = config.SQLITE_DB
initial_equity = PriceParser.parse(100000.00)
# Use Sqlite Daily Price Handler
price_handler = SqliteBarPriceHandler(
sqlite_db, events_queue, tickers
)
# Use the RSI Strategy
strategy = RSI2Strategy(config, tickers, events_queue)
# Use fixed Position Sizer,
position_sizer = FixedPositionSizer()
# Use an example Risk Manager,
risk_manager = ExampleRiskManager()
# Use the default Portfolio Handler
portfolio_handler = PortfolioHandler(
initial_equity, events_queue, price_handler,
position_sizer, risk_manager
)
# Use the ExampleCompliance component
compliance = ExampleCompliance(config)
# Use a simulated IB Execution Handler
execution_handler = IBSimulatedExecutionHandler(
events_queue, price_handler, compliance
)
# Use the default Statistics
# statistics = BasicStatistics(
# config, portfolio_handler
# )
statistics = TearsheetStatistics(
config, portfolio_handler, title, benchmark, trd_start_dt, end_dt
)
# Set up the backtest
backtest = Backtest(
price_handler, strategy, portfolio_handler, execution_handler,
position_sizer, risk_manager, statistics, initial_equity,
bt_start_dt, end_dt
)
results = backtest.simulate_trading(testing=testing)
return results
@click.command()
@click.option('--config', '-c', default=settings.DEFAULT_CONFIG_FILENAME, help='Config filename')
@click.option('--tickers', '-t', default='SPY', help='Tickers (use comma)')
@click.option('--testing/--no-testing', default=False, help='Enable testing mode')
def main(config, tickers, testing):
tickers = tickers.split(",")
config = settings.from_file(config, testing)
run(config, testing, tickers)
if __name__ == "__main__":
main()
| mit |
IshankGulati/scikit-learn | sklearn/externals/joblib/__init__.py | 23 | 5101 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make it easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.10.3'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
from .parallel import register_parallel_backend
from .parallel import parallel_backend
from .parallel import effective_n_jobs
__all__ = ['Memory', 'MemorizedResult', 'PrintTime', 'Logger', 'hash', 'dump',
'load', 'Parallel', 'delayed', 'cpu_count', 'effective_n_jobs',
'register_parallel_backend', 'parallel_backend']
| bsd-3-clause |
google/flax | setup.py | 1 | 2665 | # Copyright 2020 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""setup.py for Flax."""
import os
from setuptools import find_packages
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
try:
README = open(os.path.join(here, "README.md"), encoding='utf-8').read()
except IOError:
README = ""
install_requires = [
"numpy>=1.12",
"jax>=0.2.13",
"matplotlib", # only needed for tensorboard export
"dataclasses;python_version<'3.7'", # will only install on py3.6
"msgpack",
"optax",
]
tests_require = [
"atari-py==0.2.5", # Last version does not have the ROMs we test on pre-packaged
"clu", # All examples.
"gym",
"jaxlib",
"ml-collections",
"opencv-python",
"pytest",
"pytest-cov",
"pytest-xdist==1.34.0", # upgrading to 2.0 broke tests, need to investigate
"pytype==2021.5.25", # pytype 2021.6.17 complains on recurrent.py, need to investigate!
"sentencepiece", # WMT example.
"svn",
"tensorflow-cpu>=2.4.0",
"tensorflow_text>=2.4.0", # WMT example.
"tensorflow_datasets",
"tensorflow==2.4.1", # TODO(marcvanzee): Remove once #1326 is fixed.
]
__version__ = None
with open('flax/version.py') as f:
exec(f.read(), globals())
setup(
name="flax",
version=__version__,
description="Flax: A neural network library for JAX designed for flexibility",
long_description="\n\n".join([README]),
long_description_content_type='text/markdown',
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="",
author="Flax team",
author_email="flax-dev@google.com",
url="https://github.com/google/flax",
packages=find_packages(),
include_package_data=False,
zip_safe=False,
install_requires=install_requires,
extras_require={
"testing": tests_require,
},
)
| apache-2.0 |
ctogle/dilapidator | src/dilap/core/qtgui.py | 1 | 24408 | #import modular4.base as mb
import dilap.geometry.tools as gtl
import dilap.core.base as mb
import dilap.core.plotting as dtl
import os,sys,numpy,matplotlib,six,multiprocessing
matplotlib.rcParams['backend.qt4'] = 'PySide'
matplotlib.rcParams['pdf.fonttype'] = 42
#matplotlib.use('Qt4Agg')
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as figure_canvas
from matplotlib.backend_bases import NavigationToolbar2
import matplotlib.pyplot as plt
from PySide import QtGui,QtCore
import pdb
###############################################################################
### utility functions
###############################################################################
def runapp(windowclass,kws):
print(5,'opening window',windowclass)
mapp(windowclass,**kws).exec_()
print(5,'closed window',windowclass)
def convert_pixel_space(w,h):
good_w,good_h = 1920.0,1080.0
screensize = QtGui.QApplication.desktop().availableGeometry()
runtime_w,runtime_h = screensize.width(),screensize.height()
w_conversion = runtime_w / good_w
h_conversion = runtime_h / good_h
w_size,h_size = w * w_conversion,h * h_conversion
return w_size,h_size
def bound(label,lay):
group = QtGui.QGroupBox(title = label)
group.setLayout(lay)
return layout((group,))
def sortto(l1,l2):
new = []
for l in l1:
if not l in l2:continue
else:new.append(l)
for x in range(len(l2)):l2.pop(0)
for n in new:l2.append(n)
###############################################################################
### functions to create layouts/widgets
###############################################################################
def layout(widgets = [],orientation = 'v'):
if orientation == 'g':
l = QtGui.QGridLayout()
l.setSpacing(0.1)
for w,p,s in widgets:
wgs = (w,)+p+s
l.addWidget(*wgs)
else:
if orientation == 'v':l = QtGui.QVBoxLayout()
elif orientation == 'h':l = QtGui.QHBoxLayout()
for w in widgets:l.addWidget(w)
return l
def splitter(widgets = [],orientation = 'v',boxlabel = 'mwidgetsplitter'):
if orientation == 'v':o = QtCore.Qt.Vertical
elif orientation == 'h':o = QtCore.Qt.Horizontal
split = QtGui.QSplitter(o)
for w in widgets:split.addWidget(w)
#if not callback is None:split.splitterMoved.connect(f)
return bound(boxlabel,layout((split,)))
def buttons(funcs,events,labels,fw = None,fh = None,
boxlabel = 'mwidgetbuttons',ori = 'v'):
bs = []
for bx in range(len(funcs)):
f,e,l = funcs[bx],events[bx],labels[bx]
b = QtGui.QPushButton(l)
if not fw is None:b.setFixedWidth(fw)
if not fh is None:b.setFixedHeight(fh)
b.__getattribute__(e).connect(f)
bs.append(b)
return mwidget(layout(bs,ori),boxlabel)
def check(label,initial,callback,boxlabel = 'mwidgetcheck',ori = 'v'):
'''
create a widget containing a single check box which calls a function when toggled
'''
c = QtGui.QCheckBox(label)
if initial:c.setCheckState(QtCore.Qt.Checked)
else:c.setCheckState(QtCore.Qt.Unchecked)
togg = lambda i : callback(False if i == 0 else True)
c.stateChanged.connect(togg)
return mwidget(layout((c,),ori),boxlabel)
def checks(tlist,labels,master = True,callback = None,boxlabel = None,ori = 'v'):
'''
create a widget containing a set of check boxes which add/remove items from a list
'''
qck,quck = QtCore.Qt.CheckState.Checked,QtCore.Qt.CheckState.Unchecked
def togg(c,t):
if not t in tlist:
tlist.append(t)
c.setCheckState(qck)
elif t in tlist:
tlist.remove(t)
c.setCheckState(quck)
sortto(tlisto,tlist)
if tlisto == tlist:m.setCheckState(qck)
else:m.setCheckState(quck)
def flipall():
def f():
s = m.checkState()
for lx in range(len(labels)):
c,t = cs[lx+1],tlisto[lx]
if not c.checkState() is s:
togg(c,t)
return f
def toggle(c,t):
def f():togg(c,t)
return f
tlisto = tlist[:]
if labels is tlist:labels = tlist[:]
cs = [QtGui.QCheckBox(l) for l in labels]
for c,l in zip(cs,labels):
c.setCheckState(qck if l in tlist else quck)
c.clicked.connect(toggle(c,l))
if callback:c.clicked.connect(callback)
if master:
m = QtGui.QCheckBox('All')
m.setCheckState(qck)
for l in labels:
if not l in tlist:
m.setCheckState(quck)
break
m.clicked.connect(flipall())
if callback:m.clicked.connect(callback)
cs.insert(0,m)
if type(boxlabel) == type(''):
return mwidget(layout(cs,ori),boxlabel)
else:return cs
def selector(labels,initial,callback,boxlabel = 'mwidgetselector'):
def pick():
c = sel.currentIndex()
callback(lcopy[c])
lcopy = labels[:]
sel = QtGui.QComboBox()
for l in labels:sel.addItem(l)
sel.currentIndexChanged.connect(pick)
sel.setCurrentIndex(labels.index(initial))
return mwidget(layout((sel,)),boxlabel)
def radios(labels,initial,callback,boxlabel = 'mwidgetradios',ori = 'v'):
def pick(x):
f = lambda : callback(lcopy[x])
return f
lcopy = labels[:]
rads = [QtGui.QRadioButton(l) for l in labels]
rads[labels.index(initial)].setChecked(True)
for rx in range(len(rads)):rads[rx].clicked.connect(pick(rx))
return mwidget(layout(rads,ori),boxlabel)
def spin(minv,maxv,init = None,step = None,callback = None,boxlabel = None,ori = 'v'):
bind = lambda : callback(spin.value())
spin = QtGui.QDoubleSpinBox()
spin.setDecimals(10)
spin.setMinimum(minv)
spin.setMaximum(maxv)
if not step is None:spin.setSingleStep(step)
if not init is None:spin.setValue(init)
if not callback is None:spin.valueChanged.connect(bind)
if type(boxlabel) == type(''):
return mwidget(layout((spin,)),boxlabel)
else:return spin
def textbox(initial,callback = None,boxlabel = 'mwidgettextbox',ori = 'v'):
def f():
btx = str(box.text())
callback(btx)
box = QtGui.QLineEdit()
box.setText(str(initial))
box.editingFinished.connect(f)
return mwidget(layout((box,),ori),boxlabel)
def slider(initial,callback = None,minv = 1,maxv = 10,intv = 1,
boxlabel = 'mwidgettextbox',ori = 'v'):
o = QtCore.Qt.Vertical if ori == 'v' else QtCore.Qt.Horizontal
def sf():callback(slide.value())
slide = QtGui.QSlider(o)
slide.setMinimum(minv)
slide.setMaximum(maxv)
slide.setSingleStep(intv)
slide.setTickPosition(slide.TicksBothSides)
slide.setTickInterval(intv)
slide.setValue(initial)
slide.sliderReleased.connect(sf)
slide.setGeometry(30,40,100,30)
return mwidget(layout((slide,),ori),boxlabel)
###############################################################################
### classes useful for making applications
###############################################################################
class mapp(QtGui.QApplication):
def __init__(self,main_window_class = None,**kws):
QtGui.QApplication.__init__(self,sys.argv)
QtGui.QApplication.setStyle(QtGui.QStyleFactory.create('Plastique'))
if main_window_class is None:main_window_class = mwindow
self.main_window = main_window_class(**kws)
class mwindow(QtGui.QMainWindow):
def _standards(self,**st):
if 'title' in st:wt = st['title']
else:wt = 'mwindow'
if 'geometry' in st:geo = st['geometry']
else:
x,y = convert_pixel_space(300,300)
x_size,y_size = convert_pixel_space(512,512)
geo = (x,y,x_size,y_size)
gearicon = QtGui.QIcon(mb.resource_path('gear.png'))
self.setWindowIcon(gearicon)
self.setWindowTitle(wt)
self.setGeometry(*geo)
def __init__(self,**kws):
QtGui.QMainWindow.__init__(self)
self._standards(**kws)
w = QtGui.QWidget()
w.setLayout(self.content(**kws))
self.setCentralWidget(w)
self.show()
def content(self,**kws):
content = QtGui.QVBoxLayout()
return content
class mwidget(QtGui.QWidget,mb.base):
def awidg(self,w):
self.lay.addWidget(w)
self.update()
def __init__(self,lay = None,lab = '',scroll = False,**kws):
QtGui.QWidget.__init__(self)
if not lay is None:
if lab:lay = bound(lab,lay)
if scroll:
scroll = QtGui.QScrollArea()
scroll.setBackgroundRole(QtGui.QPalette.Window)
scroll.setWidget(mwidget(lay))
self.lay = layout((scroll,))
else:self.lay = lay
self.setLayout(self.lay)
###############################################################################
### custom widget classes
###############################################################################
figure = None
def init_figure():
global figure
figure = plt.figure()
class pltwidget(mwidget):
def __init__(self,parent,**kws):
mwidget.__init__(self)
self.parent = parent
self.fig = figure
self.canvas = figure_canvas(self.fig)
self.toolbar = plttoolbar(self.canvas)
self.ax = None
self._def('plot_callback',None,**kws)
self.setLayout(layout([self.canvas,self.toolbar],'v'))
self.setBackgroundRole(QtGui.QPalette.Window)
def show(self):
mwidget.show(self.update())
return self
def update(self):
ax = self.clear_ax()
if self.plot_callback:
self.plot_callback(ax)
self.canvas.draw()
mwidget.update(self)
return self
def clear_ax(self,proj = '3d'):
#self.fig.clf()
#ax = dtl.plot_axes(25,self.fig)
ax = self.fig.gca(projection = proj)
x = 25
ax.set_xlim([-x,x])
ax.set_ylim([-x,x])
ax.set_zlim([-(9.0/16.0)*x,(9.0/16.0)*x])
ax.cla()
ax.grid(False)
return ax
class plttoolbar(NavigationToolbar2,QtGui.QToolBar):
message = QtCore.Signal(str)
if hasattr(NavigationToolbar2,'toolitems'):
titems = NavigationToolbar2.toolitems
toolitems = [t for t in titems if t[0] in ('Pan','Zoom','Save')]
else:toolitems = []
def dynamic_update(self):
self.canvas.draw()
def pan(self,*ags):
super(plttoolbar,self).pan(*ags)
self._update_buttons_checked()
def zoom(self,*ags):
super(plttoolbar,self).zoom(*ags)
self._update_buttons_checked()
def _update_buttons_checked(self):
self._actions['pan'].setChecked(self._active == 'PAN')
self._actions['zoom'].setChecked(self._active == 'ZOOM')
def _init_toolbar(self):
for text,tooltip_text,image_file,callback in self.toolitems:
if text is None:self.addSeparator()
else:
i = QtGui.QIcon()
a = self.addAction(i,text,getattr(self,callback))
self._actions[callback] = a
if callback in ('zoom','pan'):a.setCheckable(True)
if tooltip_text is not None:a.setToolTip(tooltip_text)
self.locLabel = QtGui.QLabel("", self)
self.locLabel.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self.locLabel.setSizePolicy(QtGui.QSizePolicy(
QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Ignored))
labelAction = self.addWidget(self.locLabel)
labelAction.setVisible(True)
def __init__(self,canvas):
self.canvas = canvas
self.img_extensions = 'Image (*.png,*.pdf)'
self._actions = {}
QtGui.QToolBar.__init__(self)
NavigationToolbar2.__init__(self,canvas)
def draw_rubberband(self,event,x0,y0,x1,y1):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in (min(x0,x1),min(y0,y1),w,h)]
self.canvas.drawRectangle(rect)
def save_figure(self,*ags):
fname = QtGui.QFileDialog.getSaveFileName(self,
'Choose Filename','aplot.pdf',self.img_extensions)
if fname:
try:
self.canvas.print_figure(six.text_type(fname[0]))
print(5,'saved figure at',fname)
except Exception as e:
QtGui.QMessageBox.critical(
self,'error saving file',str(e),
QtGui.QMessageBox.Ok,QtGui.QMessageBox.NoButton)
usemp = False
def displaycontext(cx,**kws):
if usemp:
p = multiprocessing.Process(target = runapp,args = (cwindow,kws))
p.start()
return p
else:
kws['context'] = cx
runapp(cwindow,kws)
return None
'''#
class plttree_book(mwidget):
def set_page(self,pgx):
self.tree_pages[self.page].hide()
self.tree_pages[pgx].show()
self.page = pgx
def _change_page(self,cpg,ppg):
for ix in range(len(self.tree_items)):
if self.tree_items[ix] is cpg:
self.set_page(ix)
return
def _header(self,header):
if not type(header) is type(''):header = ''
self.tree.setHeaderLabel(header)
def _pages(self,pages):
self.tree.setColumnCount(1)
self.tree.clear()
self.aux = {}
self._targets = []
self._targetlabels = []
titems,tpages,tops,bottoms = [],[],[],[]
for x in range(len(pages)):
if not pages[x]:continue
pgd,pgt,pge = pages[x]
if 'header' in pge:h = str(pge['header'])
else:h = ''
toplabel = 'pspace page: %i : %s' % (x,h)
top = QtGui.QTreeWidgetItem(None,[toplabel])
tops.append(top)
main_page = mwidget()
titems.append(top)
tpages.append(main_page)
if len(pgd.shape) == 2:
subs = (((pgd,pgt),'single'),)
elif len(pgd.shape) == 3:
subs = tuple(((pgd[x],pgt),'trajectory: %i' % x) for x in range(pgd.shape[0]))
else:
print(5,'unknown tree widget scenario')
raise ValueError
for subpg,subh in subs:
for t in subpg[1]:
if not t in self._targets:
self._targets.append(t)
self._targetlabels.append(t)
bottom = QtGui.QTreeWidgetItem(top,[subh])
bottoms.append(bottom)
titems.append(bottom)
t1 = subpg[1][0]
if 'extra_trajectory' in pge:
extras = pge['extra_trajectory']
else:extras = None
if 'pspaceaxes' in pge:
self.aux['pspaceaxes'] = pge['pspaceaxes']
self.aux['entry'] = subpg
sub_page = mpltwidget(self,subpg,extras = extras)
tpages.append(sub_page)
self.tree.addTopLevelItem(top)
if self.xdomain is None:self.xdomain = t1
if self.ydomain is None:self.ydomain = t1
if self.zdomain is None:self.zdomain = t1
if self.linestyles is None:self.linestyles = ['-' for t in self._targets]
if self.linewidths is None:self.linewidths = [1 for t in self._targets]
if self.linemarkers is None:self.linemarkers = ['' for t in self._targets]
if self.linecolors is None:
linsp = numpy.linspace(0,0.9,len(self._targets))
self.linecolors = [self.colormap(i) for i in linsp]
for page in tpages:
self.hsplit.addWidget(page)
page.hide()
self.tree_items = titems
self.tree_pages = tpages
self.tree_tops = tops
self.tree_bottoms = bottoms
self.set_page(self.page)
def _axisslice_widgets(self):
def slice_callback(a):
def f(c):
v = float(c)
nearest = [abs(x-v) for x in self.axisvalues[a]]
nearest = nearest.index(min(nearest))
self.axisdefaults[a] = self.axisvalues[a][nearest]
self.update()
return f
axslices = []
for axx in range(len(self.axisnames)):
ls = tuple(str(v) for v in mb.uniq(self.axisvalues[axx]))
si = str(self.axisdefaults[axx])
cb = slice_callback(axx)
axslices.append(selector(ls,si,cb,boxlabel = self.axisnames[axx]))
return mwidget(layout(axslices),'Parameter Space Axes')
def _domain_widgets(self,dom):
i = bool(self.__getattribute__(dom+'log'))
lab = textbox(self.xlabel,self._defbind(dom+'label'),dom+'-label')
sel = selector(self._targets,self._targets[0],
self._defbind(dom+'domain'),dom+'-domain')
lg = check('Use log('+str(dom)+')',i,self._defbind(dom+'log'),'')
i = bool(self.__getattribute__(dom+'bnd'))
usebnd = check('Use Bounds',i,self._defbind(dom+'bnd'),'')
lowbnd = textbox(
str(self.xmin),self._defbind(dom+'min'),
boxlabel = dom+'-minimum',ori = 'v')
highbnd = textbox(
str(self.xmax),self._defbind(dom+'max'),
boxlabel = dom+'-maximum',ori = 'v')
sel = mwidget(layout((
mwidget(layout((sel,lab),'h')),
mwidget(layout((lg,usebnd,lowbnd,highbnd),'h'))),'v'),dom+'-axis')
sel.setFixedWidth(self._panelwidth)
return sel
def _target_widgets(self):
def tnbind(x):
def f(tname):
self._targetlabels.pop(x)
self._targetlabels.insert(x,tname)
self.update()
return f
def lwbind(x):
def f(lw):
self.linewidths[x] = int(lw)
self.update()
return f
def lsbind(x):
def f(ls):
self.linestyles[x] = str(ls)
self.update()
return f
def lmbind(x):
def f(lm):
self.linemarkers[x] = str(lm)
self.update()
return f
def clbind(x):
def f():
col = QtGui.QColorDialog.getColor()
if col.isValid():self.linecolors[x] = col.getRgbF()
self.update()
return f
tcs = checks(self._targets,self._targets,True,self.update,None)
lwidgs = []
for tx in range(len(self._targets)):
tnamebox = textbox(self._targets[tx],tnbind(tx),None)
lws = [str(x) for x in range(10)]
lwsel = selector(lws,str(self.linewidths[tx]),lwbind(tx),None)
lss = ['','-','--','-.',':']
lssel = selector(lss,str(self.linestyles[tx]),lsbind(tx),None)
lms = ['','o','v','^','<','>','s','p','*','h','H','D','d','x','+']
lmsel = selector(lms,str(self.linemarkers[tx]),lmbind(tx),None)
lcbtn = buttons((clbind(tx),),('clicked',),('Col',),30,30,None)
lwidgs.append((tcs[tx+1],(tx+1,0),(1,1)))
lwidgs.append((tnamebox,(tx+1,1),(1,1)))
lwidgs.append((lwsel,(tx+1,2),(1,1)))
lwidgs.append((lssel,(tx+1,3),(1,1)))
lwidgs.append((lmsel,(tx+1,4),(1,1)))
lwidgs.append((lcbtn,(tx+1,5),(1,1)))
lwidgs.insert(0,(tcs[0],(0,0),(1,1)))
sls = mwidget(layout(lwidgs,'g'),'Plot Targets')
return sls
def _domain_target_ptype_widgets(self):
plab = textbox(self.plottitle,self._defbind('plottitle'),'Plot Title')
pleg = check('Show Legend',self.legend,self._defbind('legend'),'')
popt = mwidget(layout((plab,pleg),'v'),'')
plab.setFixedWidth(self._panelwidth)
xaxis = self._domain_widgets('x')
yaxis = self._domain_widgets('y')
zaxis = self._domain_widgets('z')
rds = radios(self.plottypes,self.plottype,self._defbind('plottype'),'Plot Type')
tcs = self._target_widgets()
if self.axisnames:axs = self._axisslice_widgets()
else:axs = mwidget()
bot = mwidget(layout((rds,axs),'h'),'')
return mwidget(splitter((popt,xaxis,yaxis,zaxis,tcs,bot),'v',''),'Plot Filter',True)
def _widgets(self):
self.vsplit = QtGui.QSplitter(QtCore.Qt.Vertical)
self.hsplit = QtGui.QSplitter(QtCore.Qt.Horizontal)
self.tree = QtGui.QTreeWidget()
self.vsplit.addWidget(self.tree)
self.hsplit.addWidget(self.vsplit)
self.tree.currentItemChanged.connect(self._change_page)
self._header(self.header)
self._pages(self.pages)
self._set_axis_info()
updatebutton = buttons((self.update,),('clicked',),('Update Plot',),600,100,'')
self.plt_controls = self._domain_target_ptype_widgets()
self.vsplit.addWidget(updatebutton)
self.vsplit.addWidget(self.plt_controls)
return (self.hsplit,)
def _set_axis_info(self):
if 'pspaceaxes' in self.aux and self.axisdefaults is None:
self.reduce_lines = True
self.axisnames = self.aux['pspaceaxes']
d,t = self.aux['entry']
if not t == self._targets:
print('probably a serious problem!')
pdb.set_trace()
axxs = tuple(t.index(a) for a in self.axisnames)
self.axisvalues = [d[a] for a in axxs]
self.axisdefaults = [vs[0] for vs in self.axisvalues]
def _defbind(self,k):
def f(c):
self.__setattr__(k,c)
print(5,'set pltwidget attribute: %s : %s' % (k,str(c)))
self.update()
return f
def update(self):
self.tree_pages[self.page].update()
mwidget.update(self)
def calc_lines_callback(self,pwidg,ax,d,t,x,ys):
print('calc_lines_callback!')
#ax.plot([500,500],[-1,100],linewidth = 5.0,marker = 'o',color = 'b')
return ax
def calc_color_callback(self,pgwidg,ax,d,t,x,y,z):
print('calc_color_callback!')
return ax
_panelwidth = 500
def __init__(self,**kws):
mwidget.__init__(self,**kws)
self.kws = kws
self._def('line_callbacks',[],**kws)
self._def('pages',[],**kws)
self._def('page',0,**kws)
self._def('header','Data Selection',**kws)
self._def('_targets',[],**kws)
self._def('xdomain',None,**kws)
self._def('ydomain',None,**kws)
self._def('zdomain',None,**kws)
self._def('linestyles',None,**kws)
self._def('linewidths',None,**kws)
self._def('linemarkers',None,**kws)
self._def('linecolors',None,**kws)
self._def('xlabel','',**kws)
self._def('xlabelsize',20,**kws)
self._def('ylabel','',**kws)
self._def('ylabelsize',20,**kws)
self._def('zlabel','',**kws)
self._def('zlabelsize',20,**kws)
self._def('plottitle','',**kws)
self._def('plottitlesize',18,**kws)
self._def('legend',True,**kws)
self._def('xlog',False,**kws)
self._def('ylog',False,**kws)
self._def('zlog',False,**kws)
self._def('xbnd',False,**kws)
self._def('xmin','',**kws)
self._def('xmax','',**kws)
self._def('xticksize',20,**kws)
self._def('ybnd',False,**kws)
self._def('ymin','',**kws)
self._def('ymax','',**kws)
self._def('yticksize',20,**kws)
self._def('zbnd',False,**kws)
self._def('zmin','',**kws)
self._def('zmax','',**kws)
self._def('zlabsize',20,**kws)
self._def('axisnames',[],**kws)
self._def('axisvalues',[],**kws)
self._def('axisdefaults',None,**kws)
self._def('reduce_lines',False,**kws)
self._def('maxlinecount',20,**kws)
self._def('colorplot_interpolation','nearest',**kws)
self._def('colormap',plt.get_cmap('jet'),**kws)
self._def('plottypes',('lines','color'),**kws)
self._def('plottype','lines',**kws)
wgs = self._widgets()
self._layout = layout(wgs,'h')
self.setLayout(self._layout)
for top in self.tree_tops:self.tree.expandItem(top)
'''#
| mit |
Barmaley-exe/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 37 | 2901 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .01, .1):
covs = dict()
for method in ('cd', 'lars'):
cov_, _, costs = graph_lasso(emp_cov, alpha=alpha,
return_costs=True)
covs[method] = cov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'])
# Smoke test the estimator
model = GraphLasso(alpha=.1).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'])
assert_array_almost_equal(model.covariance_, covs['lars'])
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=3, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
anewmark/galaxy_dark_matter | call2_age_lum.py | 1 | 10505 | import astropy.table as table
import numpy as np
from defcuts import *
from defflags import *
from halflight_first import *
from def_get_mags import *
from def_halflight_math import *
from def_ages import *
ty='mean'
stax=True
if stax==False:
tag=''
else:
tag='uplim'
txtdist= ''
txtslope=''
outdir='/Users/amandanewmark/repositories/galaxy_dark_matter/lumprofplots/clumps/2'+ty+tag
doutdir='/Users/amandanewmark/repositories/galaxy_dark_matter/lumprofplots/distribution/2'+ty+tag
indir='/Users/amandanewmark/repositories/galaxy_dark_matter/GAH/'
DATA=table.Table.read(indir+'med_vespa_LOWZ.fits')
bands=['g', 'r', 'i','z', 'y']
daperture=[1.01,1.51,2.02,3.02,4.03,5.71,8.40,11.8,16.8,23.5]
aperture=[x*0.5 for x in daperture]
def do_cuts(datatab):
parm=['flags_pixel_saturated_center','flags_pixel_edge','flags_pixel_interpolated_center','flags_pixel_cr_center','flags_pixel_suspect_center', 'flags_pixel_clipped_any','flags_pixel_bad']
ne=[99.99, 199.99, 0.0]
mincut=0.1
maxcut=''
cutdata=not_cut(datatab, bands, 'mag_forced_cmodel', ne)
for b in range(0, len(bands)-1):
newdata=many_flags(cutdata, parm, bands[b]) #flags not in y?
cutdata=newdata
bandi=['i']
return newdata
DATA=do_cuts(DATA)
def get_agebin_dat(Data, hm):
print('Running Age Bin')
runid=Data['RUNID']
runIDs, count=np.unique(runid, return_counts=True)
ndata=Data[(runid==5) | (runid==1)]
runtest=''
if runtest=='yes':
test1=Data[runid==1]
test5=Data[runid==5]
gal1, count1=np.unique(test1['SPECOBJID'],return_counts=True)
gal5, count5=np.unique(test5['SPECOBJID'],return_counts=True)
print('Number of galaxies in run1= ', len(gal1))
print('Howmany times they repeat: ', count1)
print('Number of galaxies in run5= ', len(gal5))
print('Howmany times they repeat: ', count5)
gal, count=np.unique(ndata['SPECOBJID'],return_counts=True)
print('Number of galaxies in run==1,5: ', len(gal))
print('How many times they repeat: ', count)
newdata, notdat=mass_frac_cut1(ndata, hm, get_opp=True)
return newdata, notdat
hh=0.7
newdata, datanot= get_agebin_dat(DATA, hh)
starts1=newdata['AGESTART']
starts2=datanot['AGESTART']
data1=newdata[starts1==9.04]
data2=datanot[starts2==9.04]
def my_halflight2(dat1):
loglum, lograd, loglumd= get_ind_lums(dat1, bands, aperture, scale='log')
if stax==True:
print('hi')
loglum, lograd, loglumd= upper_rad_cut(loglum, lograd, loglumd, 4, proof=False)
#print('length of radius array is ', len(lograd))
logr12s, logr412s= get_halflight2(loglum, lograd, 4)
print('min radius in lin= ', np.min(10**lograd), 'max radius in lin= ', np.max(10**lograd))
print('min r1/2 is ', np.min(10**logr12s),'max 4r1/2 is ', np.max(10**logr412s))
#mloglum, mlogdens, mlograd, mlogerr= get_avg_lums(loglum, lograd, loglumd, gr=[0.7,71,11], type=ty, scale='lindata')
mloglum, mlogdens, mlograd, mlogerr= get_avg_lums(loglum, lograd, loglumd, gr=[0.7,71,11], type=ty, scale='lindata')
logr12s, logr412s= get_halflight2(loglum, lograd, 4)
logr12, logr412= get_halflight2(mloglum, mlograd, 4)
#for n in range(len(logr12s)):
# print(np.round(logr12s[n],3), np.round(logr412s[n],3), 'lograd= ', np.round(lograd[n],3))
Ms, cs, errs= get_slopes1(logr12s, logr412s,lograd, loglumd, error=None, smax=stax)
M, c, logrcut, logldcut, sterr, errcut =get_slopes1(logr12, logr412, mlograd, mlogdens, error=mlogerr, smax=stax)
cutmlogld = M * logrcut + c
ind=[loglum, loglumd, lograd, logr12s]
means=[mloglum,mlogdens,mlograd,logr12, mlogerr]
ind_slope=[Ms, cs, errs]
mean_slopes=[M, c, logrcut, logldcut, cutmlogld, sterr, errcut]
#logrcut and logldcut are for lines of best fit
return ind, means, ind_slope, mean_slopes
def my_graphs(inds1, means1, ind_slope1, mean_slopes1, inds2, means2, ind_slope2, mean_slopes2):
per=[str(hh*100), '%']
per=''.join(per)
tag1=['Number of Galaxies= '+str(len(inds1[0])), 'Galaxies w/ Mass Fractions >'+per,'Mass Fractions > '+per]
tag2=['Number of Galaxies= '+str(len(inds2[0])), 'Galaxies w/ Mass Fractions <'+per,'Mass Fractions < '+per]
#inds=[lum1, lumd1, rad1, hrad1]
#means=[mlum1,mdens1,mrad1,mhrad1, merr1]
#ind_slope=[m1s, c1s, err1s]
#mean_slopes=[m1, c1, radcut1, dencut1, ynew1,sterr1, errcut1]
def lum_mult_fit(x1, x2, y1, y2, xcut1, xcut2, yfit1, yfit2, sterr1, sterr2 , m1, m2, error1, error2, outdir=''):
print('Make Scatter Plots')
import matplotlib.pyplot as plt
import numpy as np
import math
f=plt.figure()
plt.scatter(x1, y1, color='r', marker='o',label=tag1[1]+' ('+str(len(inds1[0]))+')')
plt.plot(xcut1, yfit1, color='m', label='(>'+str(per)+') mean slope= '+str(round(m1,2))+' +- '+str(round(sterr1,2)))
plt.errorbar(x1, y1, yerr=error1, fmt='.',color='r')
plt.scatter(x2, y2, color='b', marker='o',label=tag2[1]+' ('+str(len(inds2[0]))+')')
plt.plot(xcut2, yfit2, color='c', label='(<'+str(per)+') mean slope= ' +str(round(m2,2))+' +- '+str(round(sterr2,2)))
plt.errorbar(x2, y2, yerr=error2, fmt='.',color='b')
plt.xlabel('Log Radii (kpc)')
plt.ylabel('Luminosity Densities (Lsolar/kpc^2)')
plt.title('Average Luminosity Densities v Radii')
plt.legend(loc=0,prop={'size':6.0})
#f.text(0.05, 0.05, txtslope, color='red', weight='bold')
outdirs=outdir+'lumage.pdf'
#plt.show()
f.savefig(outdirs)
print(outdirs)
def dist_mean(m1s, m2s, m1, m2, sterr1, sterr2, KS=False):
import matplotlib.pyplot as plt
import numpy as np
import math
figs=plt.figure()
bs=np.linspace(-2.0,-1.4,num=15, endpoint=False)
n1, b1, p1= plt.hist(m1s, bs, color='red', label=tag1[1]+ ' ('+str(len(m1s))+')', alpha=0.65, zorder=2, normed=1)
n2, b2, p2= plt.hist(m2s,bs, color='blue', label=tag2[1]+ ' ('+str(len(m2s))+')', alpha=0.65,zorder=2,normed=1)
ts=''
if KS==True:
M=m1s+m2s
import scipy
D, p=scipy.stats.ks_2samp(m1s,m2s)
plt.plot(0,0, c='green', marker='*', label='K-S test is '+str(np.round(D,3)))
plt.xlim(np.min(M),-1.3)
ts='KS'
#print('Standard Deviation ('+tag1[2]+'): ', str(round(np.std(m1s),2)))
#print('Standard Deviation ('+tag2[2]+'): ', str(round(np.std(m2s),2)))
plt.axvline(x=m1, color='magenta',label='(>'+str(per)+') mean slope= '+str(round(m1,2))+' +- '+str(round(sterr1,2)), zorder=3)
plt.axvline(x=m2, color='cyan', label='(<'+str(per)+') mean slope= '+str(round(m2,2))+' +- '+str(round(sterr2,2)), zorder=3)
plt.xlabel('Slopes', fontsize=10)
plt.legend(loc=0,prop={'size':6.5})
plt.ylabel('Frequency', fontsize=10)
plt.title('With '+ty+' Slopes')
outdirs=doutdir+ts+'slope_agedist.pdf'
#figs.text(0.03, 0.03, txtdist, color='red', weight='bold')
#plt.show()
figs.savefig(outdirs)
print(outdirs)
def slopevLmax(m1, m2, L1, L2):
import matplotlib.pyplot as plt
N1=len(m1)
N2=len(m2)
Lmax1=[np.max(L1[n]) for n in range(N1)]
Lmax2=[np.max(L2[n]) for n in range(N2)]
#gives us Lmax
fs=plt.figure()
plt.scatter(m1, Lmax1, color='red', label='Not Flagged Galaxies')
plt.scatter(m2, Lmax2, color='blue', label='Flagged Galaxies')
plt.xlabel('Slopes')
plt.ylabel('Max Luminosities (Lsolar)')
plt.title('Max Luminosities v Slopes')
plt.legend(loc=0,prop={'size':7.0})
plt.show()
def all_lumprof(lum1s, lum2s, rad1s, rad2s, x1, x2, y1, y2, error1, error2):
f=plt.figure()
print(x1)
print(x2)
print(y1)
print(y2)
for n in range(len(lum1s)):
plt.plot(rad1s[n], lum1s[n],color='lightgrey', marker='.')
for n in range(len(lum2s)):
plt.plot(rad2s[n], lum2s[n],color='lightgrey', marker='.')
plt.scatter(x1, y1, color='red', marker='o',label='#'+tag1[1]+': '+ str(len(inds1[0])), zorder=3)
plt.scatter(x2,y2,color='blue', marker='o',label='#'+tag2[1]+': '+str(len(inds2[0])), zorder=3)
plt.xlabel('Log Radii (kpc)')
plt.ylabel('Luminosity Densities (Lsolar/kpc^2)')
plt.title('Average Luminosity Densities v Radii')
plt.legend(loc=0,prop={'size':6.0})
#plt.show()
outdirs=outdir+'allage_lumprof.pdf'
#plt.show()
f.savefig(outdirs)
print(outdirs)
all_lumprof(inds1[1], inds2[1], inds1[2], inds2[2], means1[2], means2[2], means1[1], means2[1],means1[4], means2[4])
#slopevLmax(ind_slope1[0],ind_slope2[0], inds1[1], inds2[1])
dist_mean(ind_slope1[0],ind_slope2[0],mean_slopes1[0],mean_slopes2[0],mean_slopes1[5], mean_slopes2[5], KS=False)
lum_mult_fit(means1[2], means2[2], means1[1], means2[1], mean_slopes1[2], mean_slopes2[2], mean_slopes1[4], mean_slopes2[4], mean_slopes1[5], mean_slopes2[5], mean_slopes1[0], mean_slopes2[0],means1[4], means2[4], outdir=outdir)
inds1, means1, ind_slope1, mean_slopes1=my_halflight2(data1)
inds2, means2, ind_slope2, mean_slopes2=my_halflight2(data2)
my_graphs(inds1, means1, ind_slope1, mean_slopes1, inds2, means2, ind_slope2, mean_slopes2)
flagtest=''
if flagtest':
Flag1=['flags_pixel_bright_object_center', 'brobj_cen_flag-', 'No Bright Ojbect Centers', 'Only Bright Object Centers', 'brobj_cen_flag']
Flag2=['flags_pixel_bright_object_any', 'brobj_all_flag-', 'No Bright Ojbects', 'Only Bright Objects', 'brobj_all_flag']
bandi='i'
_, flag1,lab= TFflag(bandi,Flag1, data1)
_,flag2, lab= TFflag(bandi,Flag1, data2)
_, flag3,lab= TFflag(bandi,Flag2, data1)
_,flag4, lab= TFflag(bandi,Flag2, data2)
print('Total Objects in older= ', len(data1))
print('Bright Object Centers in older= ', len(flag1))
print('Bright Objects in older= ', len(flag3))
print('Total Objects in younger= ', len(data2))
print('Bright Object Centers in younger= ', len(flag2))
print('Bright Objects in younger= ', len(flag4))
#not in use currently
def my_halflight(dat1):
loglum, lograd, loglumd= get_ind_lums(dat1, bands, aperture, scale='log')
if stax==True:
loglum, lograd, loglumd= upper_rad_cut(loglum, lograd, loglumd, 4, proof=False)
#print('length of radius array is ', len(lograd))
mloglum, mlogdens, mlograd, mlogerr= get_avg_lums(loglum, lograd, loglumd, gr=[1,80,11],type=ty, scale='lindata')
logr12s= get_halflight(loglum, lograd)
logr12= get_halflight(mloglum, mlograd)
Ms, cs, errs= get_slopes(logr12s, lograd, loglumd, error=None, smax=stax)
M, c, logrcut, logldcut, sterr, errcut =get_slopes(logr12, mlograd, mlogdens, error=mlogerr, smax=stax)
cutmlogld = M * logrcut + c
ind=[loglum, loglumd, lograd, logr12s]
means=[mloglum,mlogdens,mlograd,logr12, mlogerr]
ind_slope=[Ms, cs, errs]
mean_slopes=[M, c, logrcut, logldcut, cutmlogld, sterr, errcut]
#logrcut and logldcut are for lines of best fit
return ind, means, ind_slope, mean_slopes | mit |
rvraghav93/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 110 | 5681 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM does not assume any parametric form of the data distribution
and can therefore model the complex shape of the data much better.
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list(legend1.values())
legend1_keys_list = list(legend1.keys())
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list(legend2.values())
legend2_keys_list = list(legend2.keys())
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_keys_list[0], legend2_keys_list[1], legend2_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
campagnola/acq4 | acq4/analysis/tools/Fitting.py | 3 | 36863 | #!/usr/bin/env python
from __future__ import print_function
"""
Python class wrapper for data fitting.
Includes the following external methods:
getFunctions returns the list of function names (dictionary keys)
FitRegion performs the fitting
Note that FitRegion will plot on top of the current data using MPlots routines
if the current curve and the current plot instance are passed.
"""
# January, 2009
# Paul B. Manis, Ph.D.
# UNC Chapel Hill
# Department of Otolaryngology/Head and Neck Surgery
# Supported by NIH Grants DC000425-22 and DC004551-07 to PBM.
# Copyright Paul Manis, 2009
#
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Additional Terms:
The author(s) would appreciate that any modifications to this program, or
corrections of erros, be reported to the principal author, Paul Manis, at
pmanis@med.unc.edu, with the subject line "Fitting Modifications".
"""
import sys
import numpy
import scipy
import scipy.optimize
try:
import openopt
HAVE_OPENOPT = True
except ImportError:
HAVE_OPENOPT = False
print("There was an error importing openopt. Continuing....")
import ctypes
import numpy.random
#from numba import autojit
usingMPlot = False
if usingMPlot:
import MPlot # we include plotting as part of the fitting
def debug_trace():
'''Set a tracepoint in the Python debugger that works with Qt'''
if pyqt:
from PyQt4.QtCore import pyqtRemoveInputHook
from pdb import set_trace
if pyqt:
pyqtRemoveInputHook()
set_trace()
class Fitting():
# dictionary contains:
# name of function: function call, initial parameters, iterations, plot color, then x and y for testing
# target valutes, names of parameters, contant values, and derivative function if needed.
#
def __init__(self):
self.fitfuncmap = {
'exp0' : (self.exp0eval, [0.0, 20.0], 2000, 'k', [0, 100, 1.],
[1.0, 5.0], ['A0', 'tau'], None, None),
'exp1' : (self.expeval, [-60, 3.0, 15.0], 10000, 'k', [0, 100, 1.],
[0.5, 1.0, 5.0], ['DC', 'A0', 'tau'], None, None), #self.expevalprime),
'exptau' : (self.exptaueval, [-60, 3.0, 15.0], 10000, 'k', [0, 100, 1.],
[0.5, 1.0, 5.0], ['DC', 'A0', 'tau'], None, None), #self.expevalprime),
'expsum' : (self.expsumeval, [0.0, -0.5, 200.0, -0.25, 450.0], 500000, 'k', [0, 1000, 1.],
[0.0, -1.0, 150.0, -0.25, 350.0], ['DC', 'A0', 'tau0', 'A1', 'tau1'], None, None),
'expsum2' : (self.expsumeval2, [0., -0.5, -0.250], 50000, 'k', [0, 1000, 1.],
[0., -0.5, -0.25], ['A0', 'A1'], [5., 20.], None),
'exp2' : (self.exp2eval, [0.0, -0.5, 200.0, -0.25, 450.0], 500000, 'k', [0, 1000, 1.],
[0.0, -1.0, 150.0, -0.25, 350.0], ['DC', 'A0', 'tau0', 'A1', 'tau1'], None, None),
'exppow' : (self.exppoweval, [0.0, 1.0, 100, ], 2000, 'k', [0, 100, 0.1],
[0.0, 1.0, 100.0], ['DC', 'A0', 'tau'], None, None),
'exppulse' : (self.expPulse, [3.0, 2.5, 0.2, 2.5, 2.0, 0.5], 2000, 'k', [0, 10, 0.3],
[0.0, 0., 0.75, 4., 1.5, 1.], ['DC', 't0', 'tau1', 'tau2', 'amp', 'width'], None, None),
'boltz' : (self.boltzeval, [0.0, 1.0, -50.0, -5.0], 5000, 'r', [-130., -30., 1.],
[0.00, 0.010, -100.0, 7.0], ['DC', 'A0', 'x0', 'k'], None, None),
'gauss' : (self.gausseval, [1.0, 0.0, 0.5], 2000, 'y', [-10., 10., 0.2],
[1.0, 1.0, 2.0], ['A', 'mu', 'sigma'], None, None),
'line' : (self.lineeval, [1.0, 0.0], 500, 'r', [-10., 10., 0.5],
[0.0, 2.0], ['m', 'b'], None, None),
'poly2' : (self.poly2eval, [1.0, 1.0, 0.0], 500, 'r', [0, 100, 1.],
[0.5, 1.0, 5.0], ['a', 'b', 'c'], None, None),
'poly3' : (self.poly3eval, [1.0, 1.0, 1.0, 0.0], 1000, 'r', [0., 100., 1.],
[0.5, 1.0, 5.0, 2.0], ['a', 'b', 'c', 'd'], None, None),
'poly4' : (self.poly4eval, [1.0, 1.0, 1.0, 1.0, 0.0], 1000, 'r', [0., 100., 1.],
[0.1, 0.5, 1.0, 5.0, 2.0], ['a', 'b', 'c', 'd', 'e'], None, None),
'sin' : (self.sineeval, [-1., 1.0, 4.0, 0.0], 1000, 'r', [0., 100., 0.2],
[0.0, 1.0, 9.0, 0.0], ['DC', 'A', 'f', 'phi'], None, None),
'boltz2' : (self.boltzeval2, [0.0, 0.5, -50.0, 5.0, 0.5, -20.0, 3.0], 1200, 'r',
[-100., 50., 1.], [0.0, 0.3, -45.0, 4.0, 0.7, 10.0, 12.0],
['DC', 'A1', 'x1', 'k1', 'A2', 'x2', 'k2'], None, None),
'taucurve' : (self.taucurve, [50., 300.0, 60.0, 10.0, 8.0, 65.0, 10.0], 50000, 'r',
[-150., 50., 1.], [0.0, 237.0, 60.0, 12.0, 17.0, 60.0, 14.0],
['DC', 'a1', 'v1', 'k1', 'a2', 'v2', 'k2'], None, self.taucurveder),
}
self.fitSum2Err = 0
def getFunctions(self):
return(list(self.fitfuncmap.keys()))
def exp0eval(self, p, x, y=None, C = None, sumsq = False):
"""
Exponential function with an amplitude and 0 offset
"""
yd = p[0] * numpy.exp(-x/p[1])
if y is None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2.0)
else:
return y - yd
def expsumeval(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Sum of two exponentials with independent time constants and amplitudes,
and a DC offset
"""
yd = p[0] + (p[1]* numpy.exp(-x/p[2])) + (p[3]*numpy.exp(-x/p[4]))
if y is None:
return yd
else:
yerr = y - yd
if weights is not None:
yerr = yerr * weights
if sumsq is True:
return numpy.sum(yerr**2.0)
else:
return yerr
def expsumeval2(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Sum of two exponentials, with predefined time constants , allowing
only the amplitudes and DC offset to vary
"""
yd = p[0] + (p[1]* numpy.exp(-x/C[0])) + (p[2]*numpy.exp(-x/C[1]))
if y is None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2.0)
else:
return y - yd
def exptaueval(self, p, x, y=None, C = None, sumsq = True, weights=None):
"""
Exponential with offset, decay from starting value
"""
yd = (p[0]+p[1]) - p[1] * numpy.exp(-x/p[2])
# print yd.shape
# print y.shape
if y is None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2.0)
else:
return y - yd
def expeval(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Exponential with offset
"""
yd = p[0] + p[1] * numpy.exp(-x/p[2])
# print yd.shape
# print y.shape
if y is None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2.0)
else:
return y - yd
def expevalprime(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Derivative for exponential with offset
"""
ydp = p[1] * numpy.exp(-x/p[2])/(p[2]*p[2])
yd = p[0] + p[1] * numpy.exp(-x/p[2])
if y is None:
return (yd, ydp)
else:
if sumsq is True:
return numpy.sum((y - yd)**2.0)
else:
return y - yd
def exppoweval(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Single exponential function, rising to a ppower
"""
if C is None:
cx = 1.0
else:
cx = C[0]
yd = p[0] + p[1] * (1.0-numpy.exp(-x/p[2]))**cx
if y is None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def exp2eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
For fit to activation currents...
"""
yd = p[0] + (p[1] * (1.0 - numpy.exp(-x/p[2]))**2.0 ) + (p[3] * (1.0 - numpy.exp(-x/p[4])))
if y == None:
return yd
else:
if sumsq is True:
ss = numpy.sqrt(numpy.sum((y - yd)**2.0))
# if p[4] < 3.0*p[2]:
# ss = ss*1e6 # penalize them being too close
return ss
else:
return y - yd
# @autojit
def expPulse(self, p, x, y=None, C=None, sumsq = False, weights = None):
"""Exponential pulse function (rising exponential with optional variable-length
plateau followed by falling exponential)
Parameter p is [yOffset, t0, tau1, tau2, amp, width]
"""
yOffset, t0, tau1, tau2, amp, width = p
yd = numpy.empty(x.shape)
yd[x<t0] = yOffset
m1 = (x>=t0)&(x<(t0+width))
m2 = (x>=(t0+width))
x1 = x[m1]
x2 = x[m2]
yd[m1] = amp*(1-numpy.exp(-(x1-t0)/tau1))+yOffset
amp2 = amp*(1-numpy.exp(-width/tau1)) ## y-value at start of decay
yd[m2] = ((amp2)*numpy.exp(-(x2-(width+t0))/tau2))+yOffset
if y == None:
return yd
else:
if sumsq is True:
ss = numpy.sqrt(numpy.sum((y-yd)**2.0))
return ss
else:
return y-yd
def boltzeval(self,p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0] + (p[1]-p[0])/(1.0 + numpy.exp((x-p[2])/p[3]))
if y == None:
return yd
else:
if sumsq is True:
return numpy.sqrt(numpy.sum((y - yd)**2.0))
else:
return y - yd
def boltzeval2(self,p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0] + p[1]/(1 + numpy.exp((x-p[2])/p[3])) + p[4]/(1 + numpy.exp((x-p[5])/p[6]))
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2.0)
else:
return y - yd
def gausseval(self,p, x, y=None, C = None, sumsq = False, weights=None):
yd = (p[0]/(p[2]*numpy.sqrt(2.0*numpy.pi)))*numpy.exp(-((x - p[1])**2.0)/(2.0*(p[2]**2.0)))
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2.0)
else:
return y - yd
def lineeval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x + p[1]
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2.0)
else:
return y - yd
def poly2eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x**2.0 + p[1]*x + p[2]
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2.0)
else:
return y - yd
def poly3eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x**3.0 + p[1]*x**2.0 + p[2]*x +p[3]
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2.0)
else:
return y - yd
def poly4eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x**4.0 + p[1]*x**3.0 + p[2]*x**2.0 + p[3]*x +p[4]
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2.0)
else:
return y - yd
def sineeval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0] + p[1]*numpy.sin((x*2.0*numpy.pi/p[2])+p[3])
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2.0)
else:
return y - yd
def taucurve(self, p, x, y=None, C = None, sumsq=True, weights=None):
"""
HH-like description of activation/inactivation function
'DC', 'a1', 'v1', 'k1', 'a2', 'v2', 'k2'
"""
yd = p[0] + 1.0/(p[1]*numpy.exp((x+p[2])/p[3]) +p[4]*numpy.exp(-(x+p[5])/p[6]))
if y == None:
return yd
else:
if sumsq is True:
return numpy.sqrt(numpy.sum((y - yd)**2.0))
else:
return y - yd
def taucurveder(self, p, x):
"""
Derivative for taucurve
'DC', 'a1', 'v1', 'k1', 'a2', 'v2', 'k2'
"""
y = -(p[1]*numpy.exp((p[2] + x)/p[3])/p[3] - p[4]*numpy.exp(-(p[5] + x)/p[6])/p[6])/(p[1]*numpy.exp((p[2] + x)/p[3]) +
p[4]*numpy.exp(-(p[5] + x)/p[6]))**2.0
# print 'dy: ', y
return y
def getClipData(self, x, y, t0, t1):
"""
Return the values in y that match the x range in tx from
t0 to t1. x must be monotonic increasing or decreasing.
Allow for reverse ordering. """
it0 = (numpy.abs(x-t0)).argmin()
it1 = (numpy.abs(x-t1)).argmin()
if it0 > it1:
t = it1
it1 = it0
it0 = t
return(x[it0:it1], y[it0:it1])
def FitRegion(self, whichdata, thisaxis, tdat, ydat, t0 = None, t1 = None,
fitFunc = 'exp1', fitFuncDer = None, fitPars = None, fixedPars = None,
fitPlot = None, plotInstance = None, dataType= 'xy', method = None,
bounds=None, weights=None, constraints=()):
"""
**Arguments**
============= ===================================================
whichdata
thisaxis
tdat
ydat
t0 (optional) Minimum of time data - determined from tdat if left unspecified
t1 (optional) Maximum of time data - determined from tdat if left unspecified
fitFunc (optional) The function to fit the data to (as defined in __init__). Default is 'exp1'.
fitFuncDer (optional) default=None
fitPars (optional) Initial fit parameters. Use the values defined in self.fitfuncmap if unspecified.
fixedPars (optional) Fixed parameters to pass to the function. Default=None
fitPlot (optional) default=None
plotInstance (optional) default=None
dataType (optional) Options are ['xy', 'blocks']. Default='xy'
method (optional) Options are ['curve_fit', 'fmin', 'simplex', 'Nelder-Mead', 'bfgs', 'TNC', 'SLSQP', 'COBYLA', 'L-BFGS-B', 'openopt']. Default='leastsq'
bounds (optional) default=None
weights (optional) default=None
constraints (optional) default=()
============= ===================================================
To call with tdat and ydat as simple arrays:
FitRegion(1, 0, tdat, ydat, FitFunc = 'exp1')
e.g., the first argument should be 1, but this axis is ignored if datatype is 'xy'
"""
self.fitSum2Err = 0.0
if t0 == t1:
if plotInstance is not None and usingMPlot:
(x, y) = plotInstance.getCoordinates()
t0 = x[0]
t1 = x[1]
if t1 is None:
t1 = numpy.max(tdat)
if t0 is None:
t0 = numpy.min(tdat)
func = self.fitfuncmap[fitFunc]
if func is None:
print("FitRegion: unknown function %s" % (fitFunc))
return
xp = []
xf = []
yf = []
yn = []
tx = []
names = func[6]
if fitPars is None:
fpars = func[1]
else:
fpars = fitPars
if method == 'simplex': # remap calls if needed for newer versions of scipy (>= 0.11)
method = 'Nelder-Mead'
if ydat.ndim == 1 or dataType == 'xy' or dataType == '2d': # check if 1-d, then "pretend" its only a 1-element block
nblock = 1
else:
nblock = ydat.shape[0] # otherwise, this is the number of traces in the block
# print 'datatype: ', dataType
# print 'nblock: ', nblock
# print 'whichdata: ', whichdata
for block in range(nblock):
for record in whichdata:
if dataType == 'blocks':
(tx, dy) = self.getClipData(tdat[block], ydat[block][record, thisaxis, :], t0, t1)
elif ydat.ndim == 1:
(tx, dy) = self.getClipData(tdat, ydat, t0, t1)
else:
(tx, dy) = self.getClipData(tdat, ydat[record,:], t0, t1)
# print 'Fitting.py: block, type, Fit data: ', block, dataType
# print tx.shape
# print dy.shape
tx = numpy.array(tx)-t0
dy = numpy.array(dy)
yn.append(names)
if not any(tx):
continue # no data in the window...
ier = 0
#
# Different optimization methods are included here. Not all have been tested fully with
# this wrapper.
#
if method is None or method == 'leastsq': # use standard leastsq, no bounds
plsq, cov, infodict, mesg, ier = scipy.optimize.leastsq(func[0], fpars,
args=(tx, dy, fixedPars),
full_output = 1, maxfev = func[2])
if ier > 4:
print("optimize.leastsq error flag is: %d" % (ier))
print(mesg)
elif method == 'curve_fit':
plsq, cov = scipy.optimize.curve_fit(func[0], tx, dy, p0=fpars)
ier = 0
elif method in ['fmin', 'simplex', 'Nelder-Mead', 'bfgs', 'TNC', 'SLSQP', 'COBYLA', 'L-BFGS-B']: # use standard wrapper from scipy for those routintes
if constraints is None:
constraints = ()
res = scipy.optimize.minimize(func[0], fpars, args=(tx, dy, fixedPars, True),
method=method, jac=None, hess=None, hessp=None, bounds=bounds, constraints=constraints, tol=None, callback=None,
options={'maxiter': func[2], 'disp': False })
plsq = res.x
#print " method:", method
#print " bounds:", bounds
#print " result:", plsq
# next section is replaced by the code above - kept here for reference if needed...
# elif method == 'fmin' or method == 'simplex':
# plsq = scipy.optimize.fmin(func[0], fpars, args=(tx.astype('float64'), dy.astype('float64'), fixedPars, True),
# maxfun = func[2]) # , iprint=0)
# ier = 0
# elif method == 'bfgs':
# plsq, cov, infodict = scipy.optimize.fmin_l_bfgs_b(func[0], fpars, fprime=func[8],
# args=(tx.astype('float64'), dy.astype('float64'), fixedPars, True, weights),
# maxfun = func[2], bounds = bounds,
# approx_grad = True) # , disp=0, iprint=-1)
elif method == 'openopt': # use OpenOpt's routines - usually slower, but sometimes they converge better
if not HAVE_OPENOPT:
raise Exception("Requested openopt fitting method but openopt is not installed.")
if bounds is not None:
# unpack bounds
lb = [y[0] for y in bounds]
ub = [y[1] for y in bounds]
fopt = openopt.DFP(func[0], fpars, tx, dy, df = fitFuncDer, lb=lb, ub=ub)
# fopt.df = func[8]
r = fopt.solve('nlp:ralg', plot=0, iprint = 10)
plsq = r.xf
ier = 0
else:
fopt = openopt.DFP(func[0], fpars, tx, dy, df = fitFuncDer)
print(func[8])
# fopt.df = func[7]
fopt.checkdf()
r = fopt.solve('nlp:ralg', plot=0, iprint = 10)
plsq = r.xf
ier = 0
else:
print('method %s not recognized, please check Fitting.py' % (method))
return
xfit = numpy.arange(min(tx), max(tx), (max(tx)-min(tx))/100.0)
yfit = func[0](plsq, xfit, C=fixedPars)
yy = func[0](plsq, tx, C=fixedPars) # calculate function
self.fitSum2Err = numpy.sum((dy - yy)**2)
if usingMPlot and FitPlot != None and plotInstance != None:
self.FitPlot(xFit = xfit, yFit = yfit, fitFunc = fund[0],
fitPars = plsq, plot = fitPlot, plotInstance = plotInstance)
xp.append(plsq) # parameter list
xf.append(xfit) # x plot point list
yf.append(yfit) # y fit point list
# print xp
# print len(xp)
return(xp, xf, yf, yn) # includes names with yn and range of tx
def FitPlot(self, xFit = None, yFit = None, fitFunc = 'exp1',
fitPars = None, fixedPars = None, fitPlot=None, plotInstance = None,
color=None):
""" Plot the fit data onto the fitPlot with the specified "plot Instance".
if there is no xFit, or some parameters are missing, we just return.
if there is xFit, but no yFit, then we try to compute the fit with
what we have. The plot is superimposed on the specified "fitPlot" and
the color is specified by the function color in the fitPars list.
"""
if xFit is None or fitPars is None:
return
func = self.fitfuncmap[fitFunc]
if color is None:
fcolor = func[3]
else:
fcolor = color
if yFit is None:
yFit = numpy.zeros((len(fitPars), xFit.shape[1]))
for k in range(0, len(fitPars)):
yFit[k] = func[0](fitPars[k], xFit[k], C=fixedPars)
if fitPlot is None:
return(yFit)
for k in range(0, len(fitPars)):
print(dir(plotInstance))
if plotInstance is None:
fitPlot.plot(xFit[k], yFit[k], pen=fcolor)
else:
plotInstance.PlotLine(fitPlot, xFit[k], yFit[k], color = fcolor)
return(yFit)
def getFitErr(self):
""" Return the fit error for the most recent fit
"""
return(self.fitSum2Err)
def expfit(self, x, y):
""" find best fit of a single exponential function to x and y
using the chebyshev polynomial approximation.
returns (DC, A, tau) for fit.
Perform a single exponential fit to data using Chebyshev polynomial method.
Equation fit: y = a1 * exp(-x/tau) + a0
Call: [a0 a1 tau] = expfit(x,y);
Calling parameter x is the time base, y is the data to be fit.
Returned values: a0 is the offset, a1 is the amplitude, tau is the time
constant (scaled in units of x).
Relies on routines chebftd to generate polynomial coeffs, and chebint to compute the
coefficients for the integral of the data. These are now included in this
.py file source.
This version is based on the one in the pClamp manual: HOWEVER, since
I use the bounded [-1 1] form for the Chebyshev polynomials, the coefficients are different,
and the resulting equation for tau is different. I manually optimized the tau
estimate based on fits to some simulated noisy data. (Its ok to use the whole range of d1 and d0
when the data is clean, but only the first few coeffs really hold the info when
the data is noisy.)
NOTE: The user is responsible for making sure that the passed data is appropriate,
e.g., no large noise or electronic transients, and that the time constants in the
data are adequately sampled.
To do a double exp fit with this method is possible, but more complex.
It would be computationally simpler to try breaking the data into two regions where
the fast and slow components are dominant, and fit each separately; then use that to
seed a non-linear fit (e.g., L-M) algorithm.
Final working version 4/13/99 Paul B. Manis
converted to Python 7/9/2009 Paul B. Manis. Seems functional.
"""
n = 30; # default number of polynomials coeffs to use in fit
a = numpy.amin(x)
b = numpy.amax(x)
d0 = self.chebftd(a, b, n, x, y) # coeffs for data trace...
d1 = self.chebint(a, b, d0, n) # coeffs of integral...
tau = -numpy.mean(d1[2:3]/d0[2:3])
try:
g = numpy.exp(-x/tau)
except:
g = 0.0
dg = self.chebftd(a, b, n, x, g) # generate chebyshev polynomial for unit exponential function
# now estimate the amplitude from the ratios of the coeffs.
a1 = self.estimate(d0, dg, 1)
a0 = (d0[0]-a1*dg[0])/2.0 # get the offset here
return(a0, a1, tau)#
def estimate(self, c, d, m):
""" compute optimal estimate of parameter from arrays of data """
n = len(c)
a = sum(c[m:n]*d[m:n])/sum(d[m:n]**2.0)
return(a)
# note : the following routine is a bottleneck. It should be coded in C.
def chebftd(self, a, b, n, t, d):
""" Chebyshev fit; from Press et al, p 192.
matlab code P. Manis 21 Mar 1999
"Given a function func, lower and upper limits of the interval [a,b], and
a maximum degree, n, this routine computes the n coefficients c[1..n] such that
func(x) sum(k=1, n) of ck*Tk(y) - c0/2, where y = (x -0.5*(b+a))/(0.5*(b-a))
This routine is to be used with moderately large n (30-50) the array of c's is
subsequently truncated at the smaller value m such that cm and subsequent
terms are negligible."
This routine is modified so that we find close points in x (data array) - i.e., we find
the best Chebyshev terms to describe the data as if it is an arbitrary function.
t is the x data, d is the y data...
"""
bma = 0.5*(b-a)
bpa = 0.5*(b+a)
inc = t[1]-t[0]
f = numpy.zeros(n)
for k in range(0, n):
y = numpy.cos(numpy.pi*(k+0.5)/n)
pos = int(0.5+(y*bma+bpa)/inc)
if pos < 0:
pos = 0
if pos >= len(d)-2:
pos = len(d)-2
try:
f[k]= d[pos+1]
except:
print("error in chebftd: k = %d (len f = %d) pos = %d, len(d) = %d\n" % (k, len(f), pos, len(d)))
print("you should probably make sure this doesn't happen")
fac = 2.0/n
c=numpy.zeros(n)
for j in range(0, n):
sum=0.0
for k in range(0, n):
sum = sum + f[k]*numpy.cos(numpy.pi*j*(k+0.5)/n)
c[j]=fac*sum
return(c)
def chebint(self, a, b, c, n):
""" Given a, b, and c[1..n] as output from chebft or chebftd, and given n,
the desired degree of approximation (length of c to be used),
this routine computes cint, the Chebyshev coefficients of the
integral of the function whose coeffs are in c. The constant of
integration is set so that the integral vanishes at a.
Coded from Press et al, 3/21/99 P. Manis (Matlab)
Python translation 7/8/2009 P. Manis
"""
sum = 0.0
fac = 1.0
con = 0.25*(b-a) # factor that normalizes the interval
cint = numpy.zeros(n)
for j in range(1,n-2):
cint[j]=con*(c[j-1]-c[j+1])/j
sum = sum + fac * cint[j]
fac = - fac
cint[n-1] = con*c[n-2]/(n-1)
sum = sum + fac*cint[n-1]
cint[0] = 2.0*sum # set constant of integration.
return(cint)
# routine to flatten an array/list.
#
def flatten(self, l, ltypes=(list, tuple)):
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
if not len(l):
break
else:
l[i:i+1] = list(l[i])
i += 1
return l
# flatten()
# run tests if we are "main"
if __name__ == "__main__":
# import matplotlib.pyplot as pyplot
import timeit
from . import Fitting
import matplotlib as MP
MP.use('Qt4Agg')
################## Do not modify the following code
# sets up matplotlib with sans-serif plotting...
import matplotlib.gridspec as GS
# import mpl_toolkits.axes_grid1.inset_locator as INSETS
# #import inset_axes, zoomed_inset_axes
# import mpl_toolkits.axes_grid1.anchored_artists as ANCHOR
# # import AnchoredSizeBar
stdFont = 'Arial'
import matplotlib.pyplot as pylab
pylab.rcParams['text.usetex'] = True
pylab.rcParams['interactive'] = False
pylab.rcParams['font.family'] = 'sans-serif'
pylab.rcParams['font.sans-serif'] = 'Arial'
pylab.rcParams['mathtext.default'] = 'sf'
pylab.rcParams['figure.facecolor'] = 'white'
# next setting allows pdf font to be readable in Adobe Illustrator
pylab.rcParams['pdf.fonttype'] = 42
pylab.rcParams['text.dvipnghack'] = True
##################### to here (matplotlib stuff - touchy!
Fits = Fitting.Fitting()
# x = numpy.arange(0, 100.0, 0.1)
# y = 5.0-2.5*numpy.exp(-x/5.0)+0.5*numpy.random.randn(len(x))
# (dc, aFit,tauFit) = Fits.expfit(x,y)
# yf = dc + aFit*numpy.exp(-x/tauFit)
# pyplot.figure(1)
# pyplot.plot(x,y,'k')
# pyplot.hold(True)
# pyplot.plot(x, yf, 'r')
# pyplot.show()
exploreError = False
if exploreError is True:
# explore the error surface for a function:
func = 'exp1'
f = Fits.fitfuncmap[func]
p1range = numpy.arange(0.1, 5.0, 0.1)
p2range = numpy.arange(0.1, 5.0, 0.1)
err = numpy.zeros((len(p1range), len(p2range)))
x = numpy.array(numpy.arange(f[4][0], f[4][1], f[4][2]))
C = None
if func == 'expsum2':
C = f[7]
# check exchange of tau1 ([1]) and width[4]
C = None
yOffset, t0, tau1, tau2, amp, width = f[1] # get inital parameters
y0 = f[0](f[1], x, C=C)
noise = numpy.random.random(y0.shape) - 0.5
y0 += 0.0* noise
sh = err.shape
yp = numpy.zeros((sh[0], sh[1], len(y0)))
for i, p1 in enumerate(p1range):
tau1t = tau1*p1
for j, p2 in enumerate(p2range):
ampt = amp*p2
pars = (yOffset, t0, tau1t, tau2, ampt, width) # repackage
err[i,j] = f[0](pars, x, y0, C=C, sumsq = True)
yp[i,j] = f[0](pars, x, C=C, sumsq = False)
pylab.figure()
CS=pylab.contour(p1range*tau1, p2range*width, err, 25)
CB = pylab.colorbar(CS, shrink=0.8, extend='both')
pylab.figure()
for i, p1 in enumerate(p1range):
for j, p2 in enumerate(p2range):
pylab.plot(x, yp[i,j])
pylab.plot(x, y0, 'r-', linewidth=2.0)
# run tests for each type of fit, return results to compare parameters
cons = None
bnds = None
signal_to_noise = 100000.
for func in Fits.fitfuncmap:
if func != 'exp1':
continue
print("\nFunction: %s\nTarget: " % (func), end=" ")
f = Fits.fitfuncmap[func]
for k in range(0,len(f[1])):
print("%f " % (f[1][k]), end=" ")
print("\nStarting: ", end=" ")
for k in range(0,len(f[5])):
print("%f " % (f[5][k]), end=" ")
# nstep = 500.0
# if func == 'sin':
# nstep = 100.0
x = numpy.array(numpy.arange(f[4][0], f[4][1], f[4][2]))
C = None
if func == 'expsum2':
C = f[7]
if func == 'exppulse':
C = f[7]
tv = f[5]
y = f[0](f[1], x, C=C)
yd = numpy.array(y)
noise = numpy.random.normal(0, 0.1, yd.shape)
my = numpy.amax(yd)
#yd = yd + sigmax*0.05*my*(numpy.random.random_sample(shape(yd))-0.5)
yd += noise*my/signal_to_noise
testMethod = 'SLSQP'
if func == 'taucurve':
continue
bounds=[(0., 100.), (0., 1000.), (0.0, 500.0), (0.1, 50.0),
(0., 1000), (0.0, 500.0), (0.1, 50.0)]
(fpar, xf, yf, names) = Fits.FitRegion(numpy.array([1]), 0, x, yd, fitFunc = func, bounds=bounds, method=testMethod)
elif func == 'boltz':
continue
bounds = [(-0.5,0.5), (0.0, 20.0), (-120., 0.), (-20., 0.)]
(fpar, xf, yf, names) = Fits.FitRegion(numpy.array([1]), 0, x, yd, fitFunc = func, bounds=bounds, method=testMethod)
elif func == 'exp2':
bounds=[(-0.001, 0.001), (-5.0, 0.), (1.0, 500.0), (-5.0, 0.0),
(1., 10000.)]
(fpar, xf, yf, names) = Fits.FitRegion(numpy.array([1]), 0, x, yd, fitFunc = func, bounds=bounds, method=testMethod)
elif func == 'exppulse':
# set some constraints to the fitting
# yOffset, tau1, tau2, amp, width = f[1] # order of constraings
dt = numpy.mean(numpy.diff(x))
bounds = [(-5, 5), (-15., 15.), (-2, 2.0), (2-10, 10.), (-5, 5.), (0., 5.)]
# cxample for constraints:
# cons = ({'type': 'ineq', 'fun': lambda x: x[4] - 3.0*x[2]},
# {'type': 'ineq', 'fun': lambda x: - x[4] + 12*x[2]},
# {'type': 'ineq', 'fun': lambda x: x[2]},
# {'type': 'ineq', 'fun': lambda x: - x[4] + 2000},
# )
cons = ({'type': 'ineq', 'fun': lambda x: x[3] - x[2] }, # tau1 < tau2
)
C = None
tv = f[5]
initialgr = f[0](f[5], x, None )
(fpar, xf, yf, names) = Fits.FitRegion(
numpy.array([1]), 0, x, yd, fitFunc = func, fixedPars = C, constraints = cons, bounds = bounds, method=testMethod)
# print xf
# print yf
# print fpar
# print names
else:
initialgr = f[0](f[5], x, None )
(fpar, xf, yf, names) = Fits.FitRegion(
numpy.array([1]), 0, x, yd, fitFunc = func, fixedPars = C, constraints = cons, bounds = bnds, method=testMethod)
#print fpar
s = numpy.shape(fpar)
j = 0
outstr = ""
initstr = ""
truestr = ""
for i in range(0, len(names[j])):
# print "%f " % fpar[j][i],
outstr = outstr + ('%s = %f, ' % (names[j][i], fpar[j][i]))
initstr = initstr + '%s = %f, ' % (names[j][i], tv[i])
truestr = truestr + '%s = %f, ' % (names[j][i], f[1][i])
print( "\nTrue(%d) : %s" % (j, truestr) )
print( "FIT(%d) : %s" % (j, outstr) )
print( "init(%d) : %s" % (j, initstr) )
print( "Error: : %f" % (Fits.fitSum2Err))
if func is 'exp1':
pylab.figure()
pylab.plot(numpy.array(x), yd, 'ro-')
pylab.hold(True)
pylab.plot(numpy.array(x), initialgr, 'k--')
pylab.plot(xf[0], yf[0], 'b-') # fit
pylab.show()
| mit |
cogmission/nupic.research | projects/sequence_prediction/continuous_sequence/nupic_output.py | 13 | 6732 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Provides two classes with the same signature for writing data out of NuPIC
models.
(This is a component of the One Hot Gym Prediction Tutorial.)
"""
import csv
from collections import deque
from abc import ABCMeta, abstractmethod
# Try to import matplotlib, but we don't have to.
try:
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.dates import date2num
except ImportError:
pass
import numpy as np
WINDOW = 100
class NuPICOutput(object):
__metaclass__ = ABCMeta
def __init__(self, names, showAnomalyScore=False):
self.names = names
self.showAnomalyScore = showAnomalyScore
@abstractmethod
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
pass
@abstractmethod
def close(self):
pass
class NuPICFileOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICFileOutput, self).__init__(*args, **kwargs)
self.outputFiles = []
self.outputWriters = []
self.lineCounts = []
headerRow = ['timestamp', 'data', 'prediction-5step']
for name in self.names:
self.lineCounts.append(0)
outputFileName = "./prediction/%s_TM_pred.csv" % name
print "Preparing to output %s data to %s" % (name, outputFileName)
outputFile = open(outputFileName, "w")
self.outputFiles.append(outputFile)
outputWriter = csv.writer(outputFile)
self.outputWriters.append(outputWriter)
outputWriter.writerow(headerRow)
outputWriter.writerow(['int', 'float', 'float'])
outputWriter.writerow(['', '', ''])
def write(self, timestamps, actualValues, predictedValues5step,
extraValues = []):
# assert len(timestamps) == len(actualValues) == len(predictedValues5step)
for index in range(len(self.names)):
timestamp = timestamps[index]
actual = actualValues[index]
# prediction1step = predictedValues1step[index]
prediction5step = predictedValues5step[index]
writer = self.outputWriters[index]
if timestamp is not None:
row = [timestamp, actual, prediction5step]
row.extend(extraValues)
writer.writerow(row)
self.lineCounts[index] += 1
def close(self):
for index, name in enumerate(self.names):
self.outputFiles[index].close()
print "Done. Wrote %i data lines to %s." % (self.lineCounts[index], name)
class NuPICPlotOutput(NuPICOutput):
def __init__(self, maxBucket, *args, **kwargs):
super(NuPICPlotOutput, self).__init__(*args, **kwargs)
# Turn matplotlib interactive mode on.
plt.ion()
self.dates = []
self.convertedDates = []
self.actualValues = []
self.predictedValues = []
self.actualLines = []
self.predictedLines = []
self.linesInitialized = False
self.graphs = []
self.maxBucket = maxBucket
self.likelihoodsVecAll = np.zeros((maxBucket, 10000))
plotCount = len(self.names)
plotHeight = max(plotCount * 3, 6)
fig = plt.figure(figsize=(14, plotHeight))
gs = gridspec.GridSpec(plotCount, 1)
for index in range(len(self.names)):
self.graphs.append(fig.add_subplot(gs[index, 0]))
plt.title(self.names[index])
plt.ylabel('Passenger Count')
plt.xlabel('Date')
# plt.tight_layout()
def initializeLines(self, timestamps):
for index in range(len(self.names)):
print "initializing %s" % self.names[index]
# graph = self.graphs[index]
self.dates.append(deque([timestamps[index]] * WINDOW, maxlen=WINDOW))
self.convertedDates.append(deque(
[date2num(date) for date in self.dates[index]], maxlen=WINDOW
))
self.actualValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
self.predictedValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
actualPlot, = self.graphs[index].plot(
self.dates[index], self.actualValues[index]
)
self.actualLines.append(actualPlot)
predictedPlot, = self.graphs[index].plot(
self.dates[index], self.predictedValues[index]
)
self.predictedLines.append(predictedPlot)
self.linesInitialized = True
def write(self, timestamps, actualValues, predictedValues,
predictionStep, results):
assert len(timestamps) == len(actualValues) == len(predictedValues)
# bucketLL = results.inferences['multiStepBucketLikelihoods'][5]
# likelihoodsVec = np.zeros((self.maxBucket,))
# if bucketLL is not None:
# for (k, v) in bucketLL.items():
# likelihoodsVec[k] = v
#
# i = len(self.actualValues) + 1
# self.likelihoodsVecAll[0:len(likelihoodsVec), i] = likelihoodsVec
# We need the first timestamp to initialize the lines at the right X value,
# so do that check first.
if not self.linesInitialized:
self.initializeLines(timestamps)
for index in range(len(self.names)):
self.dates[index].append(timestamps[index])
self.convertedDates[index].append(date2num(timestamps[index]))
self.actualValues[index].append(actualValues[index])
self.predictedValues[index].append(predictedValues[index])
# Update data
self.actualLines[index].set_xdata(self.convertedDates[index])
self.actualLines[index].set_ydata(self.actualValues[index])
self.predictedLines[index].set_xdata(self.convertedDates[index])
self.predictedLines[index].set_ydata(self.predictedValues[index])
self.graphs[index].relim()
self.graphs[index].autoscale_view(True, True, True)
plt.draw()
plt.legend(('actual','predicted'), loc=3)
def close(self):
plt.ioff()
plt.show()
NuPICOutput.register(NuPICFileOutput)
NuPICOutput.register(NuPICPlotOutput)
| agpl-3.0 |
Fireblend/scikit-learn | benchmarks/bench_plot_omp_lars.py | 266 | 4447 | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import pylab as pl
fig = pl.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i)
vmax = max(1 - timings.min(), -1 + timings.max())
pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
pl.xlabel('n_samples')
pl.ylabel('n_features')
pl.title(label)
pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = pl.axes([0.1, 0.08, 0.8, 0.06])
pl.colorbar(cax=ax, orientation='horizontal')
pl.show()
| bsd-3-clause |
loli/sklearn-ensembletrees | examples/plot_learning_curve.py | 250 | 4171 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=100,
test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=10,
test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
vortex-ape/scikit-learn | doc/conf.py | 5 | 10156 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
import sphinx_gallery
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'numpydoc',
'sphinx.ext.linkcode', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.imgconverter',
'sphinx_gallery.gen_gallery',
'sphinx_issues',
]
# this is needed for some reason...
# see https://github.com/numpy/numpydoc/issues/69
numpydoc_class_members_toctree = False
# For maths, use mathjax by default and svg if NO_MATHJAX env variable is set
# (useful for viewing the doc offline)
if os.environ.get('NO_MATHJAX'):
extensions.append('sphinx.ext.imgmath')
imgmath_image_format = 'svg'
else:
extensions.append('sphinx.ext.mathjax')
mathjax_path = ('https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/'
'MathJax.js?config=TeX-AMS_SVG')
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2007 - 2018, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'templates', 'includes', 'themes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}
\usepackage{morefloats}\usepackage{enumitem} \setlistdepth{10}
"""
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
# intersphinx configuration
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(
sys.version_info), None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'joblib': ('https://joblib.readthedocs.io/en/latest/', None),
}
sphinx_gallery_conf = {
'doc_module': 'sklearn',
'backreferences_dir': os.path.join('modules', 'generated'),
'reference_url': {
'sklearn': None}
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'sphx_glr_plot_classifier_comparison_001.png': 600,
'sphx_glr_plot_outlier_detection_003.png': 372,
'sphx_glr_plot_gpr_co2_001.png': 350,
'sphx_glr_plot_adaboost_twoclass_001.png': 372,
'sphx_glr_plot_compare_methods_001.png': 349}
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print('Preparing carousel images')
image_dir = os.path.join(app.builder.outdir, '_images')
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png')
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
# Config for sphinx_issues
issues_uri = 'https://github.com/scikit-learn/scikit-learn/issues/{issue}'
issues_github_path = 'scikit-learn/scikit-learn'
issues_user_uri = 'https://github.com/{user}'
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.add_javascript('js/extra.js')
app.connect('build-finished', make_carousel_thumbs)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
drericstrong/pyedna | examples/examples.py | 1 | 1220 | # -*- coding: utf-8 -*-
import pandas as pd
import pyedna.ezdna as dna
# The following code will pull snap data from TESTPNT1 over a 30-second interval:
tag = "TESTSITE.TESTSERVICE.TESTPNT1" # format site.service.tag
start = "12/01/16 01:01:01" # format mm/dd/yy hh:mm:ss
end = "01/03/17 01:01:01" # format mm/dd/yy hh:mm:ss
period = "00:00:30" # format hh:mm:ss
df = dna.GetHist(tag, start, end, period=period, mode="snap")
# Raw data may be obtained from TESTPNT1 using:
df2 = dna.GetHist(tag, start, end)
# Other supported pull types include Average, Interpolated, Max, and Min. Please
# refer to eDNA documentation for more description about these pull types.
# Multiple tags can be pulled (in Raw mode) at the same time using:
tags = ["TESTSITE.TESTSERVICE.TESTPNT1", "TESTSITE.TESTSERVICE.TESTPNT2",
"TESTSITE.TESTSERVICE.TESTPNT3", "TESTSITE.TESTSERVICE.TESTPNT4"]
df3 = dna.GetMultipleTags(tags, start, end)
# A list of connected services may be obtained using GetServices:
services = dna.GetServices()
# A list of point information for a given service can be found using GetPoints:
points = dna.GetPoints("TESTSITE.TESTSERVICE") | agpl-3.0 |
juhuntenburg/pipelines | src/mindwandering/resting_state_volume_analysis_group_level.py | 2 | 27788 | import nipype.pipeline.engine as pe
import nipype.interfaces.io as nio
import nipype.interfaces.utility as util
import nipype.interfaces.fsl as fsl
import os
import pandas as pd
from CPAC.group_analysis.group_analysis import create_group_analysis
dropbox_root = "/scr/adenauer1/PowerFolder/Dropbox"
regressors_file = dropbox_root + "/papers/neural_correlates_of_mind_wandering/regressors.csv"
from variables import workingdir, resultsdir, subjects
derivatives = {
"reho": "reho_z/_subject_id_%s/*.nii.gz",
# # "alff": "alff_z/_subject_id_%s/*.nii.gz",
"falff": "falff_z/_subject_id_%s/*.nii.gz",
# # "left_pcc": "seed_based_z/_roi_-8.-56.26/_subject_id_%s/*.nii.gz",
# # "right_pcc": "seed_based_z/_roi_8.-56.26/_subject_id_%s/*.nii.gz",
# # "left_mpfc": "seed_based_z/_roi_-6.52.-2/_subject_id_%s/*.nii.gz",
# # "right_mpfc": "seed_based_z/_roi_6.52.-2/_subject_id_%s/*.nii.gz",
"centrality": "degree_centrality/_subject_id_%s/_z_score0/*.nii.gz",
# "falff_neg_past_c96": "post_hoc_seed_based_z/_seed_name_falff_neg_past_c96/_subject_id_%s/corr_map_calc.nii.gz",
# "falff_neg_words_c70": "post_hoc_seed_based_z/_seed_name_falff_neg_words_c70/_subject_id_%s/corr_map_calc.nii.gz",
# "falff_pos_negative_c81": "post_hoc_seed_based_z/_seed_name_falff_pos_negative_c81/_subject_id_%s/corr_map_calc.nii.gz",
# "reho_pos_friends_c93": "post_hoc_seed_based_z/_seed_name_reho_pos_friends_c93/_subject_id_%s/corr_map_calc.nii.gz",
# "falff_neg_positive_c90": "post_hoc_seed_based_z/_seed_name_falff_neg_positive_c90/_subject_id_%s/corr_map_calc.nii.gz",
# "falff_neg_words_c71": "post_hoc_seed_based_z/_seed_name_falff_neg_words_c71/_subject_id_%s/corr_map_calc.nii.gz",
# "falff_pos_positive_c101": "post_hoc_seed_based_z/_seed_name_falff_pos_positive_c101/_subject_id_%s/corr_map_calc.nii.gz",
# "reho_pos_specific_vague_c82": "post_hoc_seed_based_z/_seed_name_reho_pos_specific_vague_c82/_subject_id_%s/corr_map_calc.nii.gz",
# "falff_neg_specific_vague_c71": "post_hoc_seed_based_z/_seed_name_falff_neg_specific_vague_c71/_subject_id_%s/corr_map_calc.nii.gz",
# "falff_pos_friends_c83": "post_hoc_seed_based_z/_seed_name_falff_pos_friends_c83/_subject_id_%s/corr_map_calc.nii.gz",
# "reho_neg_future_c73": "post_hoc_seed_based_z/_seed_name_reho_neg_future_c73/_subject_id_%s/corr_map_calc.nii.gz",
# "falff_neg_words_c70_and_c71": "post_hoc_seed_based_z/_seed_name_falff_neg_words_c70/_subject_id_%s/corr_map_calc.nii.gz",
# "all_with_MeanFD_falff_neg_past_c97": "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_neg_past_c97/_subject_id_%s/corr_map_calc.nii.gz",
# "all_with_MeanFD_falff_neg_words_c75": "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_neg_words_c75/_subject_id_%s/corr_map_calc.nii.gz",
# "all_with_MeanFD_falff_pos_negative_c81": "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_pos_negative_c81/_subject_id_%s/corr_map_calc.nii.gz",
# "all_with_MeanFD_reho_pos_friends_c92": "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_reho_pos_friends_c92/_subject_id_%s/corr_map_calc.nii.gz",
# 'all_with_MeanFD_falff_neg_positive_c88': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_neg_positive_c88/_subject_id_%s/corr_map_calc.nii.gz",
# "all_with_MeanFD_falff_pos_friends_c84": "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_pos_friends_c84/_subject_id_%s/corr_map_calc.nii.gz",
# "all_with_MeanFD_falff_pos_positive_c98": "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_pos_positive_c98/_subject_id_%s/corr_map_calc.nii.gz",
# "all_with_MeanFD_reho_pos_specific_vague_c78": "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_reho_pos_specific_vague_c78/_subject_id_%s/corr_map_calc.nii.gz",
# 'all_with_MeanFD_falff_neg_words_c74_and_c75': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_neg_words_c74_and_c75/_subject_id_%s/corr_map_calc.nii.gz",
# 'all_with_MeanFD_falff_pos_images_c88': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_pos_images_c88/_subject_id_%s/corr_map_calc.nii.gz",
# 'all_with_MeanFD_reho_neg_future_c75': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_reho_neg_future_c75/_subject_id_%s/corr_map_calc.nii.gz",
'all_with_MeanFD_falff_past_higher_than_future_c76': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_past_higher_than_future_c76/_subject_id_%s/corr_map_calc.nii.gz",
'all_with_MeanFD_falff_neg_friends_c82': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_neg_friends_c82/_subject_id_%s/corr_map_calc.nii.gz",
'all_with_MeanFD_falff_neg_specific_vague_c77': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_neg_specific_vague_c77/_subject_id_%s/corr_map_calc.nii.gz",
'all_with_MeanFD_centrality_neg_past_c26': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_centrality_neg_past_c26_2mm/_subject_id_%s/corr_map_calc.nii.gz",
'all_with_MeanFD_reho_neg_negative_c87': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_reho_neg_negative_c87/_subject_id_%s/corr_map_calc.nii.gz",
'all_with_MeanFD_reho_positive_higher_than_negative_c75': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_reho_positive_higher_than_negative_c75/_subject_id_%s/corr_map_calc.nii.gz",
}
# for i, RSNid in enumerate([5, 15, 9, 6, 8, 1, 2, 7, 12, 11]):
# derivatives["RSN%d"%(i+1)] = "dual_regression_z/_subject_id_%s" + "/temp_reg_map_z_%04d.nii.gz"%RSNid
if __name__ == '__main__':
wf = pe.Workflow(name="group_analysis")
wf.base_dir = workingdir
wf.config['execution']['crashdump_dir'] = wf.base_dir + "/crash_files"
mask_datasource = pe.Node(nio.DataGrabber(infields=['subject_ids'], outfields = ['mask_files']), name="mask_datasource")
mask_datasource.inputs.base_directory = resultsdir
mask_datasource.inputs.template = 'functional_mask/_subject_id_%s/*.nii'
mask_datasource.inputs.template_args['mask_files'] = [['subject_ids']]
mask_datasource.inputs.sort_filelist = True
mask_datasource.inputs.subject_ids = subjects
def calculate_group_mask(list_of_subject_masks):
import nibabel as nb
import numpy as np
import os
first_nii = nb.load(list_of_subject_masks[0])
sum_mask = np.zeros(first_nii.get_shape())
for mask in list_of_subject_masks:
mask_data = nb.load(mask).get_data()
sum_mask[np.logical_and(np.logical_not(np.isnan(mask_data)),mask_data > 0)] += 1
sum_mask /= len(list_of_subject_masks)
sum_mask[sum_mask != 1] = 0
new_img = nb.Nifti1Image(sum_mask, first_nii.get_affine(), first_nii.get_header())
filename= "group_mask.nii.gz"
nb.save(new_img, filename)
return os.path.abspath(filename)
calculate_group_mask_node = pe.Node(util.Function(input_names=["list_of_subject_masks"],
output_names=["group_mask"],
function=calculate_group_mask),
name="calculate_group_mask")
wf.connect(mask_datasource, "mask_files", calculate_group_mask_node, "list_of_subject_masks")
restrict_to_grey = pe.Node(fsl.maths.ApplyMask(), name="restrict_to_grey")
restrict_to_grey.inputs.mask_file = "/scr/adenauer1/MNI152_T1_graymatter100.nii.gz"
wf.connect(calculate_group_mask_node, "group_mask", restrict_to_grey, "in_file")
# merge_masks = pe.Node(fsl.Merge(dimension="t"), name="merge_masks")
# wf.connect(mask_datasource, "mask_files", merge_masks, "in_files")
#
# smooth_masks = pe.Node(fsl.maths.IsotropicSmooth(fwhm=fwhm), name="smooth_masks")
# wf.connect(merge_masks, "merged_file", smooth_masks, "in_file")
#
# mask_smooth_masks = pe.Node(fsl.maths.ApplyMask(), name="mask_smooth_masks")
# wf.connect(smooth_masks, "out_file", mask_smooth_masks, "in_file")
# wf.connect(merge_masks, "merged_file", mask_smooth_masks, "mask_file")
# def create_design(regressors_file, regressors, confounds=[], subject_ids=None):
#
# regressors_df = pd.read_csv(regressors_file).sort(columns="queried_ursi")
# if subject_ids:
# regressors_df = regressors_df[regressors_df.queried_ursi.isin(subject_ids)]
# regressors_df = regressors_df.filter(regressors + confounds)
# for row in regressors_df.iterrows():
# print "\t".join([str(i) for i in row[1]])
#
# for i,regressor in enumerate(regressors):
# print "/ContrastName%d\t%s"(i,regressor)
#
# print """/NumWaves %d
# /NumContrasts %d
# """%(len(regressors + confounds), len(regressors))
#
# for i,regressor in enumerate(regressors):
# print ["0"]*len(confounds)
models = ["past", "future", "positive", "negative", "friends", "specific_vague", "words", "images", "firstSum", "secondSum"]
#models = ["firstSum"]
model_nodes = {}
confounds = ["age","sex"]
# regressors_df = pd.read_csv(regressors_file).sort(columns="queried_ursi")
regressors_df = pd.read_csv("/scr/adenauer1/PowerFolder/Dropbox/papers/neural_correlates_of_mind_wandering/regressors_new_MeanFD.csv").sort(columns="queried_ursi")
subjects_int = [int(s) for s in subjects]
regressors_df = regressors_df[regressors_df.queried_ursi.isin(subjects_int)]
regressors_df["male"] = (regressors_df["sex"] == "male")*1
regressors_df["female"] = (regressors_df["sex"] == "female")*1
for reg in ["past", "future", "positive", "negative", "friends", "specific_vague", "words", "images", "firstSum", "secondSum", "age"]:
regressors_df["male_"+reg] = (regressors_df["sex"] == "male")*regressors_df[reg]
regressors_df["female_"+reg] = (regressors_df["sex"] == "female")*regressors_df[reg]
""" First part """
models = {
"all": {"variables": ["past", "future", "positive", "negative", "friends", "specific_vague", "words", "images","age", "male", "female"],
"contrasts": [
("pos_past", 'T', ["past"], [1]),
("neg_past", 'T', ["past"], [-1]),
("pos_future", 'T', ["future"], [1]),
("neg_future", 'T', ["future"], [-1]),
("past_higher_than_future", 'T', ["past", "future"], [1, -1]),
("future_higher_than_past", 'T', ["future", "past"], [1, -1]),
("pos_positive", 'T', ["positive"], [1]),
("neg_positive", 'T', ["positive"], [-1]),
("pos_negative", 'T', ["negative"], [1]),
("neg_negative", 'T', ["negative"], [-1]),
("positive_higher_than_negative", 'T', ["positive", "negative"], [1, -1]),
("negative_higher_than_positive", 'T', ["negative", "positive"], [1, -1]),
("pos_friends", 'T', ["friends"], [1]),
("neg_friends", 'T', ["friends"], [-1]),
("pos_specific_vague", 'T', ["specific_vague"], [1]),
("neg_specific_vague", 'T', ["specific_vague"], [-1]),
("pos_words", 'T', ["words"], [1]),
("neg_words", 'T', ["words"], [-1]),
("pos_images", 'T', ["images"], [1]),
("neg_images", 'T', ["images"], [-1]),
# ("pos_age", 'T', ["age"], [1]),
# ("neg_age", 'T', ["age"], [-1]),
# ("male_higher_than_female", 'T', ["male", "female"], [1, -1]),
# ("female_higher_than_male", 'T', ["male", "female"], [-1, 1]),
]},
"all_with_MeanFD": {"variables": ["past", "future", "positive", "negative", "friends", "specific_vague", "words", "images","age", "male", "female", "newMeanFD"],
"contrasts": [
("pos_past", 'T', ["past"], [1]),
("neg_past", 'T', ["past"], [-1]),
("pos_future", 'T', ["future"], [1]),
("neg_future", 'T', ["future"], [-1]),
("past_higher_than_future", 'T', ["past", "future"], [1, -1]),
("future_higher_than_past", 'T', ["future", "past"], [1, -1]),
("pos_positive", 'T', ["positive"], [1]),
("neg_positive", 'T', ["positive"], [-1]),
("pos_negative", 'T', ["negative"], [1]),
("neg_negative", 'T', ["negative"], [-1]),
("positive_higher_than_negative", 'T', ["positive", "negative"], [1, -1]),
("negative_higher_than_positive", 'T', ["negative", "positive"], [1, -1]),
("pos_friends", 'T', ["friends"], [1]),
("neg_friends", 'T', ["friends"], [-1]),
("pos_specific_vague", 'T', ["specific_vague"], [1]),
("neg_specific_vague", 'T', ["specific_vague"], [-1]),
("pos_words", 'T', ["words"], [1]),
("neg_words", 'T', ["words"], [-1]),
("pos_images", 'T', ["images"], [1]),
("neg_images", 'T', ["images"], [-1]),
# ("pos_age", 'T', ["age"], [1]),
# ("neg_age", 'T', ["age"], [-1]),
# ("male_higher_than_female", 'T', ["male", "female"], [1, -1]),
# ("female_higher_than_male", 'T', ["male", "female"], [-1, 1]),
]},
"first_with_MeanFD": {"variables": ["past", "future", "positive", "negative", "friends", "age", "male", "female", "newMeanFD"],
"contrasts": [
("pos_past", 'T', ["past"], [1]),
("neg_past", 'T', ["past"], [-1]),
("pos_future", 'T', ["future"], [1]),
("neg_future", 'T', ["future"], [-1]),
("past_higher_than_future", 'T', ["past", "future"], [1, -1]),
("future_higher_than_past", 'T', ["future", "past"], [1, -1]),
("pos_positive", 'T', ["positive"], [1]),
("neg_positive", 'T', ["positive"], [-1]),
("pos_negative", 'T', ["negative"], [1]),
("neg_negative", 'T', ["negative"], [-1]),
("positive_higher_than_negative", 'T', ["positive", "negative"], [1, -1]),
("negative_higher_than_positive", 'T', ["negative", "positive"], [1, -1]),
("pos_friends", 'T', ["friends"], [1]),
("neg_friends", 'T', ["friends"], [-1]),
]},
"second_with_MeanFD": {"variables": ["specific_vague", "words", "images","age", "male", "female", "newMeanFD"],
"contrasts": [
("pos_specific_vague", 'T', ["specific_vague"], [1]),
("neg_specific_vague", 'T', ["specific_vague"], [-1]),
("pos_words", 'T', ["words"], [1]),
("neg_words", 'T', ["words"], [-1]),
("pos_images", 'T', ["images"], [1]),
("neg_images", 'T', ["images"], [-1]),
# ("pos_age", 'T', ["age"], [1]),
# ("neg_age", 'T', ["age"], [-1]),
# ("male_higher_than_female", 'T', ["male", "female"], [1, -1]),
# ("female_higher_than_male", 'T', ["male", "female"], [-1, 1]),
]},
# "age_sex": {"variables": ["age", "male", "female"],
# "contrasts": [("pos_age", 'T', ["age"], [1]),
# ("neg_age", 'T', ["age"], [-1]),
# ("male_higher_than_female", 'T', ["male", "female"], [1, -1]),
# ("female_higher_than_male", 'T', ["male", "female"], [-1, 1]),
# ]},
# "first_sum": {"variables": ["firstSum", "age", "male", "female"],
# "contrasts": [("pos_firstSum", 'T', ["firstSum"], [1]),
# ("neg_firstSum", 'T', ["firstSum"], [-1]),
# ]}
}
for name, model in models.iteritems():
model_node = pe.Node(fsl.MultipleRegressDesign(), name="%s_model"%name)
regressors = {}
for reg in model["variables"]:
regressors[reg] = list(regressors_df[reg])
model_node.inputs.regressors = regressors
model_node.inputs.contrasts = model["contrasts"]
model_nodes[name] = model_node
# first_part_model_node = pe.Node(fsl.MultipleRegressDesign(), name="first_part_model")
# regressors = {}
# for reg in confounds + ["past", "future", "positive", "negative", "friends"]:
# regressors[reg] = list(regressors_df[reg])
# past = ("past", 'T', ["past"], [1])
# future = ("future", 'T', ["future"], [1])
# past_vs_future = ("past_vs_future", 'T', ["past", "future"], [1, -1])
# future_vs_past = ("future_vs_past", 'T', ["future", "past"], [1, -1])
# positive = ("positive", 'T', ["positive"], [1])
# negative = ("negative", 'T', ["negative"], [1])
# positive_vs_negative = ("positive_vs_negative", 'T', ["positive", "negative"], [1, -1])
# negative_vs_positive = ("negative_vs_positive", 'T', ["negative", "positive"], [1, -1])
# friends = ("friends", 'T', ["friends"], [1])
# contrasts = [past, future, positive, negative, friends, past_vs_future, future_vs_past, positive_vs_negative, negative_vs_positive,
# ("first_part", 'F', [past, future, positive, negative, friends])]
# first_part_model_node.inputs.regressors = regressors
# first_part_model_node.inputs.contrasts = contrasts
# model_nodes["first_part"] = first_part_model_node
#
# second_part_model_node = pe.Node(fsl.MultipleRegressDesign(), name="second_part_model")
# regressors = {}
# for reg in confounds + [ "specific_vague", "words", "images"]:
# regressors[reg] = list(regressors_df[reg])
# specific_vague = ("specific_vague", 'T', ["specific_vague"], [1])
# words = ("words", 'T', ["words"], [1])
# images = ("images", 'T', ["images"], [1])
# contrasts = [specific_vague, words, images,
# ("second_part", 'F', [specific_vague, words, images])]
# second_part_model_node.inputs.regressors = regressors
# second_part_model_node.inputs.contrasts = contrasts
# model_nodes["second_part"] = second_part_model_node
#
# age_model_node = pe.Node(fsl.MultipleRegressDesign(), name="age_model_node")
# regressors = {}
# regressors["age"] = list(regressors_df["age"])
# regressors["sex"] = list(regressors_df["sex"])
# contrasts = [("pos_age", 'T', ["age"], [1]),
# ("neg_age", 'T', ["age"], [-1]),base + "_z_map.nii.gz"
# ("pos_sex", 'T', ["sex"], [1]),
# ("neg_sex", 'T', ["sex"], [-1])]
# age_model_node.inputs.regressors = regressors
# age_model_node.inputs.contrasts = contrasts
# model_nodes["age"] = age_model_node
for derivative, template in derivatives.iteritems():
derivative_datasource = pe.Node(nio.DataGrabber(infields=['subject_ids'], outfields = ['derivative_files']), name="%s_datasource"%derivative)
derivative_datasource.inputs.base_directory = resultsdir
derivative_datasource.inputs.template = template
derivative_datasource.inputs.sort_filelist = True
derivative_datasource.inputs.subject_ids = subjects
merge = pe.Node(fsl.Merge(dimension="t"), name="%s_merge"%derivative)
wf.connect(derivative_datasource, "derivative_files", merge, "in_files")
avg = pe.Node(fsl.maths.MeanImage(dimension="T"), name="%s_avg"%derivative)
avg.inputs.out_file = "%s_avg.nii.gz"%derivative
wf.connect(merge, "merged_file", avg, "in_file")
stddev = pe.Node(fsl.ImageMaths(op_string="-Tstd"), name="%s_stddev"%derivative)
stddev.inputs.out_file = "%s_stddev.nii.gz"%derivative
wf.connect(merge, "merged_file", stddev, "in_file")
def one_sample_test(avg_file, stddev_file, n):
import nibabel as nb
import numpy as np
import scipy.stats as stats
import os
from nipype.utils.filemanip import split_filename
avg_nii = nb.load(avg_file)
avg_data = avg_nii.get_data()
stddev_nii = nb.load(stddev_file)
stddev_data = stddev_nii.get_data()
t_map = (avg_data/(stddev_data/np.sqrt(n)))
z_map = stats.norm.ppf(stats.t.cdf(t_map,n-1))
z_map[np.isinf(z_map)] = t_map[np.isinf(z_map)]
out_nii = nb.Nifti1Image(z_map, avg_nii.get_affine(), avg_nii.get_header())
_, base, _ = split_filename(avg_file)
nb.save(out_nii, base + "_z_map.nii.gz")
return os.path.abspath(base + "_z_map.nii.gz")
one_sample_t_test = pe.Node(util.Function(input_names= ["avg_file", "stddev_file", "n"],
output_names = ["z_map"],
function = one_sample_test),
name="%s_one_sample_t_test"%derivative)
one_sample_t_test.inputs.n = len(subjects) - 1
wf.connect(avg, "out_file", one_sample_t_test, "avg_file")
wf.connect(stddev, "out_file", one_sample_t_test, "stddev_file")
# for model in model_nodes.keys():
# # estimate = pe.Node(fsl.Randomise(), name="%s_%s_estimate"%(model,derivative))
# # estimate.inputs.tfce = True
# # estimate.inputs.raw_stats_imgs = True
# # estimate.inputs.vox_p_values = True
# # estimate.inputs.demean = True
# # estimate.inputs.base_name = "%s_%s"%(model,derivative)
# # wf.connect(merge, "merged_file", estimate, "in_file")
# # if derivative != "centrality":
# # wf.connect(restrict_to_grey, "out_file", estimate, "mask")
# # wf.connect(model_nodes[model], "design_mat", estimate, "design_mat")
# # wf.connect(model_nodes[model], "design_con", estimate, "tcon")
# # wf.connect(model_nodes[model], "design_fts", estimate, "fcon")
#
# estimate_parametric = create_group_analysis(wf_name="%s_%s_estimate_parametric"%(model,derivative))
# estimate_parametric.inputs.inputspec.z_threshold = 2.3
# estimate_parametric.inputs.inputspec.p_threshold = 1 #0.05/2.0/4.0
# estimate_parametric.inputs.inputspec.parameters = ("/scr/adenauer1/templates/", "MNI152")
# merge_mask = estimate_parametric.get_node("merge_mask")
# cluster = estimate_parametric.get_node("easy_thresh_z").get_node("cluster")
# cluster.inputs.use_mm = True
# estimate_parametric.remove_nodes([merge_mask])
# estimate_parametric.remove_nodes([estimate_parametric.get_node("easy_thresh_z").get_node("overlay"),
# estimate_parametric.get_node("easy_thresh_z").get_node("slicer"),
# estimate_parametric.get_node("easy_thresh_z").get_node("create_tuple"),
# estimate_parametric.get_node("easy_thresh_z").get_node("image_stats"),
# estimate_parametric.get_node("easy_thresh_z").get_node("get_backgroundimage2")])
# if derivative != "centrality":
# wf.connect(restrict_to_grey, "out_file", estimate_parametric, "fsl_flameo.mask_file")
# wf.connect(restrict_to_grey, "out_file", estimate_parametric, "easy_thresh_z.inputspec.merge_mask")
# else:
# estimate_parametric.inputs.fsl_flameo.mask_file = "/scr/kalifornien1/mindwandering/workingdir/calculating_measures/downsample_mask/group_mask_masked_flirt.nii.gz"
# estimate_parametric.inputs.easy_thresh_z.inputspec.merge_mask = "/scr/kalifornien1/mindwandering/workingdir/calculating_measures/downsample_mask/group_mask_masked_flirt.nii.gz"
#
#
# estimate_parametric.inputs.easy_thresh_z.cluster.out_threshold_file = [ "derivative_" + derivative + "_model_" + model + "_contrast_" + c[0] + ".nii.gz" for c in models[model]["contrasts"]]
# wf.connect(derivative_datasource, "derivative_files", estimate_parametric, "inputspec.zmap_files")
# wf.connect(model_nodes[model], "design_mat", estimate_parametric, "inputspec.mat_file")
# wf.connect(model_nodes[model], "design_con", estimate_parametric, "inputspec.con_file")
# wf.connect(model_nodes[model], "design_fts", estimate_parametric, "inputspec.fts_file")
# wf.connect(model_nodes[model], "design_grp", estimate_parametric, "inputspec.grp_file")
#wf.write_graph(graph2use='exec')
wf.run(plugin="Linear")
# for file in glob("/scr/kalifornien1/mindwandering/workingdir/group_analysis/*estimate/*tfce_corrp_tstat*.nii.gz"):
# max = nb.load(file).get_data().max()
# if max > 0.95:
# print file.split("/")[-1] + " max p:" + str(max)
| mit |
oesteban/mriqc | mriqc/classifier/sklearn/_split.py | 1 | 4547 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
# @Author: oesteban
# @Date: 2017-06-14 12:47:30
import numpy as np
from sklearn.utils import indexable
from sklearn.model_selection import (LeavePGroupsOut, StratifiedKFold)
from sklearn.model_selection._split import _RepeatedSplits
from ... import logging
LOG = logging.getLogger('mriqc.classifier')
class RobustLeavePGroupsOut(LeavePGroupsOut):
"""
A LeavePGroupsOut split ensuring all folds have positive and
negative samples.
"""
def __init__(self, n_groups, groups=None):
self._splits = None
self._groups = groups
super(RobustLeavePGroupsOut, self).__init__(n_groups)
def split(self, X, y=None, groups=None):
if self._splits:
return self._splits
if groups is None:
groups = self._groups
if groups is None:
from ..data import get_groups
groups, _ = get_groups(X)
self._groups = groups
self._splits = list(super(RobustLeavePGroupsOut, self).split(
X, y=y, groups=groups))
rmfold = []
for i, (_, test_idx) in enumerate(self._splits):
if len(np.unique(np.array(y)[test_idx])) == 1:
rmfold.append(i)
if rmfold:
self._splits = [split for i, split in enumerate(self._splits)
if i not in rmfold]
LOG.warning('Some splits (%d) were dropped because one or more classes'
' are totally missing', len(rmfold))
return self._splits
@property
def groups(self):
return self._groups
def get_n_splits(self, X, y, groups):
return len(self.split(X, y, groups))
class BalancedKFold(StratifiedKFold):
"""
A balanced K-Fold split
"""
def split(self, X, y, groups=None):
splits = super(BalancedKFold, self).split(X, y, groups)
y = np.array(y)
for train_index, test_index in splits:
split_y = y[test_index]
classes_y, y_inversed = np.unique(split_y, return_inverse=True)
min_y = min(np.bincount(y_inversed))
new_index = np.zeros(min_y * len(classes_y), dtype=int)
for cls in classes_y:
cls_index = test_index[split_y == cls]
if len(cls_index) > min_y:
cls_index = np.random.choice(
cls_index, size=min_y, replace=False)
new_index[cls * min_y:(cls + 1) * min_y] = cls_index
yield train_index, new_index
class RepeatedBalancedKFold(_RepeatedSplits):
"""
A repeated K-Fold split, where test folds are balanced
"""
def __init__(self, n_splits=5, n_repeats=10, random_state=None):
super(RepeatedBalancedKFold, self).__init__(
BalancedKFold, n_repeats, random_state, n_splits=n_splits)
class PartiallyHeldOutKFold(StratifiedKFold):
"""
A K-Fold split on the test set where the train splits are
augmented with the original train set (in whole).
"""
def __init__(self, n_splits=3, shuffle=False, random_state=None, groups=None):
self._splits = None
self._groups = groups
super(PartiallyHeldOutKFold, self).__init__(
n_splits=n_splits, shuffle=shuffle, random_state=random_state)
def split(self, X, y, groups=None):
if groups is None:
groups = self._groups
X, y, groups = indexable(X, y, groups)
msk = np.array(groups, dtype=bool)
train_idx = np.arange(len(X))[~msk]
test_idx = np.arange(len(X))[msk]
try:
test_x = X.as_matrix()[test_idx, :]
except AttributeError:
test_x = X[test_idx, :]
test_y = np.array(y)[test_idx]
split = super(PartiallyHeldOutKFold, self).split(
test_x, test_y)
offset = test_idx[0]
for test_train, test_test in split:
test_train = np.concatenate((train_idx, test_train + offset))
yield test_train, test_test
class RepeatedPartiallyHeldOutKFold(_RepeatedSplits):
"""
A repeated RepeatedPartiallyHeldOutKFold split
"""
def __init__(self, n_splits=5, n_repeats=10, random_state=None, groups=None):
super(RepeatedPartiallyHeldOutKFold, self).__init__(
PartiallyHeldOutKFold, n_repeats, random_state, n_splits=n_splits,
groups=groups)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.