repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
togawa28/mousestyles | mousestyles/classification/clustering.py | 3 | 7621 | from __future__ import (absolute_import, division,
print_function, unicode_literals)
from scipy.cluster.hierarchy import linkage
from sklearn.cluster import AgglomerativeClustering, KMeans
from sklearn import metrics
import numpy as np
from scipy.cluster.hierarchy import cophenet
from scipy.spatial.distance import pdist
from mousestyles.data.utils import day_to_mouse_average
# prep data functions
def prep_data(mouse_data, melted=False, std=True, rescale=True):
"""
Returns a ndarray data to be used in clustering algorithms:
column 0 : strain,
column 1: mouse,
other columns corresponding to feature avg/std of a mouse over 16 days
that may or may not be rescaled to the same unit as specified
Parameters
----------
mouse_data:
(i) a 21131 * (4 + ) pandas DataFrame,
column 0 : strain,
column 1: mouse,
column 2: day,
column 3: hour,
other columns corresponding to features
or
(ii) a 1921 * (3 + ) pandas DataFrame,
column 0: strain,
column 1: mouse,
column 2: day,
other columns corresponding to features
melted: bool,
False if the input mouse_data is of type (i)
std: bool,
whether the standard deviation of each feature is returned
rescale: bool,
whether each column is rescaled or not (rescale is performed by the
column's maximum)
Returns
-------
The ndarray as specified
"""
if melted:
mouse_X = np.array(mouse_data.iloc[:, 3:], dtype=float)
else:
mouse_X = np.array(mouse_data.iloc[:, 4:], dtype=float)
mouse_labels = np.array(mouse_data.iloc[:, 0:3])
mouse_dayavg, mouse_daystd = day_to_mouse_average(
mouse_X, mouse_labels, num_strains=16, stdev=True, stderr=False)
mouse_dayavgstd = np.hstack([mouse_dayavg, mouse_daystd[:, 2:]])
mouse_dayavgstd_X = mouse_dayavgstd[:, 2:]
mouse_dayavgstd_X_scl = mouse_dayavgstd_X / np.max(
mouse_dayavgstd_X, axis=0)
mouse_dayavgstd_scl = np.hstack(
[mouse_dayavgstd[:, 0:2], mouse_dayavgstd_X_scl])
if (std is False and rescale is False):
return mouse_dayavg
elif (std is True and rescale is True):
return mouse_dayavgstd
elif (std is False and rescale is True):
return mouse_dayavgstd_scl[:, 0:(mouse_dayavg.shape[1])]
else:
return mouse_dayavgstd_scl
# model fitting functions
def get_optimal_hc_params(mouse_day):
"""
Returns a list of 2: [method, dist]
method: {'ward', 'average', 'complete'}
dist: {'cityblock', 'euclidean', 'chebychev'}
Parameters
----------
mouse_day: a 170 * M numpy array,
column 0 : strain,
column 1: mouse,
other columns corresponding to feature avg/std of a mouse over 16 days
Returns
-------
method_distance: list
[method, dist]
"""
methods = ['ward', 'average', 'complete']
dists = ['cityblock', 'euclidean', 'chebychev']
method_dists = [(methods[i], dists[j]) for i in range(len(methods))
for j in range(len(dists))]
method_dists = [(method, dist) for method, dist in method_dists
if method != 'ward' or dist == 'euclidean']
cs = []
for method, dist in method_dists:
Z = linkage(mouse_day[:, 2:], method=method, metric=dist)
c, coph_dists = cophenet(Z, pdist(mouse_day[:, 2:]))
cs.append(c)
# determine the distance method
method, dist = method_dists[np.argmax(cs)]
return [method, dist]
def fit_hc(mouse_day_X, method, dist, num_clusters=range(2, 17)):
"""
Returns a list of 2: [silhouettes, cluster_labels]
silhouettes: list of float,
cluster_labels: list of list,
each sublist is the labels corresponding to the silhouette
Parameters
----------
mouse_day_X: a 170 * M numpy array,
all columns corresponding to feature avg/std of a mouse over 16 days
method: str,
method of calculating distance between clusters
dist: str,
distance metric
num_clusters: range
range of number of clusters
Returns
-------
A list of 2: [silhouettes, cluster_labels]
"""
if (dist == "chebychev"):
dist = "chebyshev"
cluster_labels = []
silhouettes = []
for n_clusters in num_clusters:
clustering = AgglomerativeClustering(
linkage=method, n_clusters=n_clusters)
clustering.fit(mouse_day_X)
labels = clustering.labels_
silhouettes.append(metrics.silhouette_score(
mouse_day_X, labels, metric=dist))
cluster_labels.append(list(labels))
return [silhouettes, cluster_labels]
def get_optimal_fit_kmeans(mouse_X, num_clusters, raw=False):
"""
Returns a list of 2: [silhouettes, cluster_labels]
silhouettes: list of float,
cluster_labels: list of list,
each sublist is the labels corresponding to the silhouette
Parameters
----------
mouse_X: a 170 * M numpy array or 21131 * M numpy array,
all columns corresponding to feature avg/std of a mouse over 16 days
or the raw data without averaging over days
num_clusters: range or a list or a numpy array
range of number of clusters
raw: a boolean with default is False
False if using the 170 * M array
Returns
-------
A list of 2: [silhouettes, cluster_labels]
"""
if raw:
sample_amount = 1000
else:
sample_amount = mouse_X.shape[0]
cluster_labels = []
silhouettes = []
for n_clusters in num_clusters:
clustering = KMeans(n_clusters=n_clusters)
clustering.fit(mouse_X)
labels = clustering.labels_
silhouettes.append(
metrics.silhouette_score(
mouse_X, labels, metric="euclidean",
sample_size=sample_amount))
cluster_labels.append(list(labels))
return [silhouettes, cluster_labels]
def cluster_in_strain(labels_first, labels_second):
"""
Returns a dictionary object indicating the count of different
clusters in each different strain (when put cluster labels as first)
or the count of different strain in each clusters (when put strain
labels as first).
Parameters
----------
labels_first: numpy arrary or list
A numpy arrary or list of integers representing which cluster
the mice in, or representing which strain mice in.
labels_second: numpy arrary or list
A numpy array or list of integers (0-15) representing which strain
the mice in, or representing which cluster the mice in
Returns
-------
count_data : dictionary
A dictioanry object with key is the strain number and value is a list
indicating the distribution of clusters, or the key is the cluster
number and the value is a list indicating the distribution of each
strain.
Examples
--------
>>> count_1 = cluster_in_strain([1,2,1,0,0],[0,1,1,2,1])
"""
count_data = {}
labels_first = np.asarray(labels_first)
labels_second = np.asarray(labels_second)
for label_2 in np.unique(labels_second):
label_2_index = labels_second == label_2
label_1_sub = labels_first[label_2_index]
count_list = []
for label_1 in np.unique(labels_first):
count_list.append(sum(label_1_sub == label_1))
count_data[label_2] = count_list
return count_data
| bsd-2-clause |
tomlyscan/Ordenacao | sort.py | 1 | 2277 | import argparse
import pandas as pd
import bib
parser = argparse.ArgumentParser()
parser.add_argument('-f', action='store', dest='list_integers', help='Arquivo com os números a serem ordenados')
parser.add_argument('-s', action='store_const', dest='sort_method', const='selection', help='Algoritmo selection sort selecionado')
parser.add_argument('-i', action='store_const', dest='sort_method', const='insertion', help='Algoritmo insertion sort selecionado')
parser.add_argument('-q', action='store_const', dest='sort_method', const='quick', help='Algoritmo quick sort selecionado')
parser.add_argument('-m', action='store_const', dest='sort_method', const='merge', help='Algoritmo merge sort selecionado')
parser.add_argument('-c', action='store_const', dest='sort_method', const='count', help='Algoritmo count sort selecionado')
parser.add_argument('-b', action='store_const', dest='sort_method', const='bucket', help='Algoritmo bucket sort selecionado')
parser.add_argument('-r', action='store_const', dest='sort_method', const='radix', help='Algoritmo radix sort selecionado')
parser.add_argument('-mhp', action='store_const', dest='sort_method', const='max_heap', help='Algoritmo heap máximo selecionado')
parser.add_argument('-hp', action='store_const', dest='sort_method', const='heap', help='Algoritmo heap sort selecionado')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
results = parser.parse_args()
data = pd.read_csv(results.list_integers, header=None)
arr = data.to_numpy().ravel()
res = []
if(results.sort_method == 'selection'):
res = bib.selection_sort(arr[1:])
elif(results.sort_method == 'insertion'):
res = bib.insertion_sort(arr[1:])
elif(results.sort_method == 'count'):
res = bib.counting_sort(arr[1:])
elif(results.sort_method == 'radix'):
res = bib.radix_sort(arr[1:])
elif(results.sort_method == 'bucket'):
res = bib.bucket_sort(arr[1:])
#elif(results.sort_method == 'merge'):
# res = bib.merge_sort(arr[1:])
#elif(results.sort_method == 'quick'):
# res = bib.quickSort(arr[1:], 0, len(arr)-1)
elif results.sort_method == 'max_heap':
res = bib.build_max_heap(arr[1:])
elif results.sort_method == 'heap':
res = bib.heap_sort(arr[1:])
for i in range(len(res)):
print(res[i]) | gpl-3.0 |
sumspr/scikit-learn | sklearn/tests/test_cross_validation.py | 27 | 41664 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
| bsd-3-clause |
verdverm/pypge | test/test_odeint.py | 1 | 1852 | #!/usr/bin/env python
"""
Program to plot the motion of a "springy pendulum".
(kindly taken from: http://julianoliver.com/share/free-science-books/comp-phys-python.pdf [page 102-103])
We actually have FOUR parameters to track, here:
L, L dot, theta, and theta dot.
So instead of the usual Nx2 array, make it Nx4.
Each 4-element row will be used for the state of
the system at one instant, and each instant is
separated by time dt. I'll use the order given above.
"""
import numpy as np
import scipy
from scipy.integrate import odeint
## Nx4
N = 1000 # number of steps to take
y = np.zeros([4])
Lo = 1.0 # unstretched spring length
L = 1.0 # Initial stretch of spring
vo = 0.0 # initial velocity
thetao = 0.3 # radians
omegao = 0.0 # initial angular velocity
y[0] = L # set initial state
y[1] = vo
y[2] = thetao
y[3] = omegao
time = np.linspace(0, 25, N)
k = 3.5 # spring constant, in N/m
m = 0.2 # mass, in kg
gravity = 9.8 # g, in m/s^2
def springpendulum(y, time):
"""
This defines the set of differential equations
we are solving. Note that there are more than
just the usual two derivatives!
"""
g0 = y[1]
g1 = (Lo+y[0])*y[3]*y[3] - k/m*y[0] + gravity*np.cos(y[2])
g2 = y[3]
g3 = -(gravity*np.sin(y[2]) + 2.0*y[1]*y[3]) / (Lo + y[0])
return np.array([g0,g1,g2,g3])
# Now we do the calculations.
answer = scipy.integrate.odeint(springpendulum, y, time)
# Now graph the results.
# rather than graph in terms of t, I'm going
# to graph the track the mass takes in 2D.
# This will require that I change L,theta data
# to x,y data.
xdata = (Lo + answer[:,0])*np.sin(answer[:,2])
ydata = -(Lo + answer[:,0])*np.cos(answer[:,2])
import os
if os.getenv("TRAVIS", "false") != "true":
import matplotlib.pyplot as plt
plt.plot(xdata, ydata, 'r-')
plt.xlabel("Horizontal position")
plt.ylabel("Vertical position")
# plt.show()
| mit |
StuartLittlefair/astropy | astropy/visualization/wcsaxes/patches.py | 3 | 6772 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib.patches import Polygon
from astropy import units as u
from astropy.coordinates.representation import UnitSphericalRepresentation
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product
__all__ = ['Quadrangle', 'SphericalCircle']
# Monkey-patch the docs to fix CapStyle and JoinStyle subs.
# TODO! delete when upstream fix matplotlib/matplotlib#19839
Polygon.__init__.__doc__ = Polygon.__init__.__doc__.replace(
"`.CapStyle`", "``matplotlib._enums.CapStyle``")
Polygon.__init__.__doc__ = Polygon.__init__.__doc__.replace(
"`.JoinStyle`", "``matplotlib._enums.JoinStyle``")
Polygon.set_capstyle.__doc__ = Polygon.set_capstyle.__doc__.replace(
"`.CapStyle`", "``matplotlib._enums.CapStyle``")
Polygon.set_joinstyle.__doc__ = Polygon.set_joinstyle.__doc__.replace(
"`.JoinStyle`", "``matplotlib._enums.JoinStyle``")
def _rotate_polygon(lon, lat, lon0, lat0):
"""
Given a polygon with vertices defined by (lon, lat), rotate the polygon
such that the North pole of the spherical coordinates is now at (lon0,
lat0). Therefore, to end up with a polygon centered on (lon0, lat0), the
polygon should initially be drawn around the North pole.
"""
# Create a representation object
polygon = UnitSphericalRepresentation(lon=lon, lat=lat)
# Determine rotation matrix to make it so that the circle is centered
# on the correct longitude/latitude.
m1 = rotation_matrix(-(0.5 * np.pi * u.radian - lat0), axis='y')
m2 = rotation_matrix(-lon0, axis='z')
transform_matrix = matrix_product(m2, m1)
# Apply 3D rotation
polygon = polygon.to_cartesian()
polygon = polygon.transform(transform_matrix)
polygon = UnitSphericalRepresentation.from_cartesian(polygon)
return polygon.lon, polygon.lat
class SphericalCircle(Polygon):
"""
Create a patch representing a spherical circle - that is, a circle that is
formed of all the points that are within a certain angle of the central
coordinates on a sphere. Here we assume that latitude goes from -90 to +90
This class is needed in cases where the user wants to add a circular patch
to a celestial image, since otherwise the circle will be distorted, because
a fixed interval in longitude corresponds to a different angle on the sky
depending on the latitude.
Parameters
----------
center : tuple or `~astropy.units.Quantity`
This can be either a tuple of two `~astropy.units.Quantity` objects, or
a single `~astropy.units.Quantity` array with two elements.
radius : `~astropy.units.Quantity`
The radius of the circle
resolution : int, optional
The number of points that make up the circle - increase this to get a
smoother circle.
vertex_unit : `~astropy.units.Unit`
The units in which the resulting polygon should be defined - this
should match the unit that the transformation (e.g. the WCS
transformation) expects as input.
Notes
-----
Additional keyword arguments are passed to `~matplotlib.patches.Polygon`
"""
def __init__(self, center, radius, resolution=100, vertex_unit=u.degree, **kwargs):
# Extract longitude/latitude, either from a tuple of two quantities, or
# a single 2-element Quantity.
longitude, latitude = center
# Start off by generating the circle around the North pole
lon = np.linspace(0., 2 * np.pi, resolution + 1)[:-1] * u.radian
lat = np.repeat(0.5 * np.pi - radius.to_value(u.radian), resolution) * u.radian
lon, lat = _rotate_polygon(lon, lat, longitude, latitude)
# Extract new longitude/latitude in the requested units
lon = lon.to_value(vertex_unit)
lat = lat.to_value(vertex_unit)
# Create polygon vertices
vertices = np.array([lon, lat]).transpose()
super().__init__(vertices, **kwargs)
class Quadrangle(Polygon):
"""
Create a patch representing a latitude-longitude quadrangle.
The edges of the quadrangle lie on two lines of constant longitude and two
lines of constant latitude (or the equivalent component names in the
coordinate frame of interest, such as right ascension and declination).
Note that lines of constant latitude are not great circles.
Unlike `matplotlib.patches.Rectangle`, the edges of this patch will render
as curved lines if appropriate for the WCS transformation.
Parameters
----------
anchor : tuple or `~astropy.units.Quantity`
This can be either a tuple of two `~astropy.units.Quantity` objects, or
a single `~astropy.units.Quantity` array with two elements.
width : `~astropy.units.Quantity`
The width of the quadrangle in longitude (or, e.g., right ascension)
height : `~astropy.units.Quantity`
The height of the quadrangle in latitude (or, e.g., declination)
resolution : int, optional
The number of points that make up each side of the quadrangle -
increase this to get a smoother quadrangle.
vertex_unit : `~astropy.units.Unit`
The units in which the resulting polygon should be defined - this
should match the unit that the transformation (e.g. the WCS
transformation) expects as input.
Notes
-----
Additional keyword arguments are passed to `~matplotlib.patches.Polygon`
"""
def __init__(self, anchor, width, height, resolution=100, vertex_unit=u.degree, **kwargs):
# Extract longitude/latitude, either from a tuple of two quantities, or
# a single 2-element Quantity.
longitude, latitude = u.Quantity(anchor).to_value(vertex_unit)
# Convert the quadrangle dimensions to the appropriate units
width = width.to_value(vertex_unit)
height = height.to_value(vertex_unit)
# Create progressions in longitude and latitude
lon_seq = longitude + np.linspace(0, width, resolution + 1)
lat_seq = latitude + np.linspace(0, height, resolution + 1)
# Trace the path of the quadrangle
lon = np.concatenate([lon_seq[:-1],
np.repeat(lon_seq[-1], resolution),
np.flip(lon_seq[1:]),
np.repeat(lon_seq[0], resolution)])
lat = np.concatenate([np.repeat(lat_seq[0], resolution),
lat_seq[:-1],
np.repeat(lat_seq[-1], resolution),
np.flip(lat_seq[1:])])
# Create polygon vertices
vertices = np.array([lon, lat]).transpose()
super().__init__(vertices, **kwargs)
| bsd-3-clause |
GGoussar/scikit-image | doc/examples/filters/plot_entropy.py | 9 | 2234 | """
=======
Entropy
=======
In information theory, information entropy is the log-base-2 of the number of
possible outcomes for a message.
For an image, local entropy is related to the complexity contained in a given
neighborhood, typically defined by a structuring element. The entropy filter can
detect subtle variations in the local gray level distribution.
In the first example, the image is composed of two surfaces with two slightly
different distributions. The image has a uniform random distribution in the
range [-14, +14] in the middle of the image and a uniform random distribution in
the range [-15, 15] at the image borders, both centered at a gray value of 128.
To detect the central square, we compute the local entropy measure using a
circular structuring element of a radius big enough to capture the local gray
level distribution. The second example shows how to detect texture in the camera
image using a smaller structuring element.
"""
import matplotlib.pyplot as plt
import numpy as np
from skimage import data
from skimage.util import img_as_ubyte
from skimage.filters.rank import entropy
from skimage.morphology import disk
# First example: object detection.
noise_mask = 28 * np.ones((128, 128), dtype=np.uint8)
noise_mask[32:-32, 32:-32] = 30
noise = (noise_mask * np.random.random(noise_mask.shape) - 0.5 *
noise_mask).astype(np.uint8)
img = noise + 128
entr_img = entropy(img, disk(10))
fig, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(8, 3))
ax0.imshow(noise_mask, cmap=plt.cm.gray)
ax0.set_xlabel("Noise mask")
ax1.imshow(img, cmap=plt.cm.gray)
ax1.set_xlabel("Noisy image")
ax2.imshow(entr_img)
ax2.set_xlabel("Local entropy")
fig.tight_layout()
# Second example: texture detection.
image = img_as_ubyte(data.camera())
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(10, 4), sharex=True,
sharey=True,
subplot_kw={"adjustable": "box-forced"})
img0 = ax0.imshow(image, cmap=plt.cm.gray)
ax0.set_title("Image")
ax0.axis("off")
fig.colorbar(img0, ax=ax0)
img1 = ax1.imshow(entropy(image, disk(5)), cmap=plt.cm.gray)
ax1.set_title("Entropy")
ax1.axis("off")
fig.colorbar(img1, ax=ax1)
fig.tight_layout()
plt.show()
| bsd-3-clause |
alistairlow/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py | 72 | 12865 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(
predictions['class'], np.argmax(
predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(
classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
| apache-2.0 |
spallavolu/scikit-learn | examples/decomposition/plot_sparse_coding.py | 247 | 3846 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| bsd-3-clause |
eleweek/dataisbeautiful | rbugs_statistics.py | 1 | 2922 | import os
from datetime import datetime
from datetime import date
from collections import defaultdict
import json
import praw
from praw.helpers import submissions_between
import seaborn as sns
import matplotlib.patches as mpatches
def get_flair_stats():
user = os.environ['REDDIT_USERNAME']
user_agent = 'Calculating ignored bugs by {}'.format(user)
r = praw.Reddit(user_agent)
flair_stats = defaultdict(lambda: defaultdict(lambda: 0))
for s in submissions_between(r, 'bugs', lowest_timestamp=1400000000):
created = datetime.utcfromtimestamp(s.created_utc)
month = (created.year, created.month)
# They started to add flairs since Janury, 2015
if month < (2015, 2):
break
# Current month has incomplete data
if month == (date.today().year, date.today().month):
continue
# Submissions without flairs seems to be mainly duplicate submissions removed by mods
# They are not viewable in the interface, so we aren't counting them
if not s.link_flair_text:
print "IGNORING POST WITHOUT A FLAIR", s.permalink, s.title
continue
flair_stats[month][s.link_flair_text] += 1
return flair_stats
def convert_month_keys_to_strings(flair_stats):
return {("{}.{}" if m > 9 else "{}.0{}").format(y, m): v
for ((y, m), v) in flair_stats.iteritems()}
def do_plot(flair_stats, filename):
months = []
new_flaired = []
total = []
for month, month_stats in sorted(flair_stats.items()):
total.append(sum(month_stats.values()))
new_flaired.append(month_stats['new'])
months.append(month)
sns.set_style('whitegrid')
total_plot_color = sns.xkcd_rgb["denim blue"]
ignored_plot_color = sns.xkcd_rgb["orange red"]
total_plot = sns.pointplot(x=months, y=total, color=total_plot_color)
sns.pointplot(x=months, y=new_flaired, color=ignored_plot_color)
total_patch = mpatches.Patch(color=total_plot_color)
ignored_patch = mpatches.Patch(color=ignored_plot_color)
total_plot.set(ylabel="Number of bugreports", xlabel="Month")
total_plot.set_title('/r/bugs statistics by month:\nReddit admins consistently ignore half of bugreports', y=1.02)
sns.plt.legend([total_patch, ignored_patch], ['Total number of bugreports',
'Number of ignored bugreports (submissions with "new" flair)'],
loc="lower left")
sns.plt.savefig(filename)
if not os.path.exists("rbugs_flair_stats.json"):
flair_stats = convert_month_keys_to_strings(get_flair_stats())
with open("rbugs_flair_stats.json", "w") as flair_stats_file:
json.dump(flair_stats, flair_stats_file)
else:
with open("rbugs_flair_stats.json") as flair_stats_file:
flair_stats = json.load(flair_stats_file)
do_plot(flair_stats, "rbugs_statistics.png")
| mit |
nitlev/deepforest | deepforest/models.py | 1 | 3738 | import numpy as np
from sklearn import clone
from sklearn.model_selection import KFold
class Models(object):
"""
A class abstracting away a bunch of models. predict_proba and predict_proba
returns all the individual predictions stacked on a new dimension.
"""
def __init__(self, models):
self._models = models
def fit(self, X, y):
"""
Fits all the underlying models
:param X: A (n_samples, n_features) array
:param y: The input target (n_samples,)
:return: a fitted version of self
"""
for model in self._models:
model.fit(X, y)
return self
def predict_proba(self, X):
"""
Returns an array of predictions probabilities. All the predictions of
the underlying models are stacked on a new dimension.
:param X: A (n_samples, n_features) array
:return: A (n_samples, n_classes, n_models) dimensions array
"""
return np.stack(
[model.predict_proba(X) for model in self._models],
axis=-1
)
def predict(self, X):
"""
Returns an array of predictions. All the predictions of the underlying
models are stacked on a new dimension
:param X: A (n_samples, n_features) array
:return: A (n_samples, n_models) dimensions array
"""
return np.stack([model.predict_proba(X) for model in self._models],
axis=-1)
def __getitem__(self, item):
"""
Models indexing method
:param item: int
:return: model, as passed to constructor
"""
return self._models[item]
class CrossValidatedModel(object):
"""
A class abstracting away the cross-validation step. The base model given
as input is cloned n_splits times so that each clone will be trained on a
subset of the input data while calling the fit method. The each prediction
is averaged when calling the predict_proba and predict_proba methods.
"""
def __init__(self, model, n_splits=3):
self.base_model = model
self._models = [clone(model) for _ in range(n_splits)]
self.n_splits = n_splits
def fit(self, X, y):
"""
Fits all the underlying models
:param X: A (n_samples, n_features) array
:param y: The input target (n_samples,)
:return: a fitted version of self
"""
for model in self._models:
model.fit(X, y)
return self
def predict_proba(self, X):
"""
Returns an array of predictions probabilities. All the predictions of
the underlying models are stacked on a new dimension.
:param X: A (n_samples, n_features) array
:return: A (n_samples, n_classes, n_splits) dimensions array
"""
predictions = np.full(
(len(X), len(self._models[0].classes_), self.n_splits),
np.nan
)
kfold = KFold(n_splits=self.n_splits)
for i, (train_index, _) in enumerate(kfold.split(X)):
X_train = X[train_index]
model = self._models[i]
prediction = model.predict_proba(X_train)
predictions[train_index, :, i] = prediction
mean_prediction = np.nanmean(predictions, axis=-1)
return mean_prediction
def predict(self, X):
"""
Returns an array of predictions. All the predictions of the underlying
models are stacked on a new dimension
:param X: A (n_samples, n_features) array
:return: A (n_samples, n_splits) dimensions array
"""
prediction_proba = self.predict_proba(X)
return np.argmax(prediction_proba, axis=-1)
| mit |
ChanderG/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/dask/array/numpy_compat.py | 6 | 13707 | from __future__ import absolute_import, division, print_function
from ..compatibility import builtins
import numpy as np
import warnings
try:
isclose = np.isclose
except AttributeError:
def isclose(*args, **kwargs):
raise RuntimeError("You need numpy version 1.7 or greater to use "
"isclose.")
try:
full = np.full
except AttributeError:
def full(shape, fill_value, dtype=None, order=None):
"""Our implementation of numpy.full because your numpy is old."""
if order is not None:
raise NotImplementedError("`order` kwarg is not supported upgrade "
"to Numpy 1.8 or greater for support.")
return np.multiply(fill_value, np.ones(shape, dtype=dtype),
dtype=dtype)
# Taken from scikit-learn:
# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/fixes.py#L84
try:
with warnings.catch_warnings():
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float)) or
not np.allclose(np.divide(1, .5, dtype='i8'), 2) or
not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Divide with dtype doesn't work on Python 3
def divide(x1, x2, out=None, dtype=None):
"""Implementation of numpy.divide that works with dtype kwarg.
Temporary compatibility fix for a bug in numpy's version. See
https://github.com/numpy/numpy/issues/3484 for the relevant issue."""
x = np.divide(x1, x2, out)
if dtype is not None:
x = x.astype(dtype)
return x
# functions copied from numpy
try:
from numpy import broadcast_to, nanprod, nancumsum, nancumprod
except ImportError: # pragma: no cover
# these functions should arrive in numpy v1.10 to v1.12. Until then,
# they are duplicated here
# See https://github.com/numpy/numpy/blob/master/LICENSE.txt
# or NUMPY_LICENSE.txt within this directory
def _maybe_view_as_subclass(original_array, new_array):
if type(original_array) is not type(new_array):
# if input was an ndarray subclass and subclasses were OK,
# then view the result as that subclass.
new_array = new_array.view(type=type(original_array))
# Since we have done something akin to a view from original_array, we
# should let the subclass finalize (if it has it implemented, i.e., is
# not None).
if new_array.__array_finalize__:
new_array.__array_finalize__(original_array)
return new_array
def _broadcast_to(array, shape, subok, readonly):
shape = tuple(shape) if np.iterable(shape) else (shape,)
array = np.array(array, copy=False, subok=subok)
if not shape and array.shape:
raise ValueError('cannot broadcast a non-scalar to a scalar array')
if builtins.any(size < 0 for size in shape):
raise ValueError('all elements of broadcast shape must be non-'
'negative')
broadcast = np.nditer(
(array,), flags=['multi_index', 'zerosize_ok', 'refs_ok'],
op_flags=['readonly'], itershape=shape, order='C').itviews[0]
result = _maybe_view_as_subclass(array, broadcast)
if not readonly and array.flags.writeable:
result.flags.writeable = True
return result
def broadcast_to(array, shape, subok=False):
"""Broadcast an array to a new shape.
Parameters
----------
array : array_like
The array to broadcast.
shape : tuple
The shape of the desired array.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
Returns
-------
broadcast : array
A readonly view on the original array with the given shape. It is
typically not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location.
Raises
------
ValueError
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> np.broadcast_to(x, (3, 3)) # doctest: +SKIP
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
"""
return _broadcast_to(array, shape, subok=subok, readonly=True)
def _replace_nan(a, val):
"""
If `a` is of inexact type, make a copy of `a`, replace NaNs with
the `val` value, and return the copy together with a boolean mask
marking the locations where NaNs were present. If `a` is not of
inexact type, do nothing and return `a` together with a mask of None.
Note that scalars will end up as array scalars, which is important
for using the result as the value of the out argument in some
operations.
Parameters
----------
a : array-like
Input array.
val : float
NaN values are set to val before doing the operation.
Returns
-------
y : ndarray
If `a` is of inexact type, return a copy of `a` with the NaNs
replaced by the fill value, otherwise return `a`.
mask: {bool, None}
If `a` is of inexact type, return a boolean mask marking locations of
NaNs, otherwise return None.
"""
is_new = not isinstance(a, np.ndarray)
if is_new:
a = np.array(a)
if not issubclass(a.dtype.type, np.inexact):
return a, None
if not is_new:
# need copy
a = np.array(a, subok=True)
mask = np.isnan(a)
np.copyto(a, val, where=mask)
return a, mask
def nanprod(a, axis=None, dtype=None, out=None, keepdims=0):
"""
Return the product of array elements over a given axis treating Not a
Numbers (NaNs) as zero.
One is returned for slices that are all-NaN or empty.
.. versionadded:: 1.10.0
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the product is computed. The default is to compute
the product of the flattened array.
dtype : data-type, optional
The type of the returned array and of the accumulator in which the
elements are summed. By default, the dtype of `a` is used. An
exception is when `a` has an integer type with less precision than
the platform (u)intp. In that case, the default will be either
(u)int32 or (u)int64 depending on whether the platform is 32 or 64
bits. For inexact inputs, dtype must be inexact.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details. The casting of NaN to integer can yield
unexpected results.
keepdims : bool, optional
If True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will
broadcast correctly against the original `arr`.
Returns
-------
y : ndarray or numpy scalar
See Also
--------
numpy.prod : Product across array propagating NaNs.
isnan : Show which elements are NaN.
Notes
-----
Numpy integer arithmetic is modular. If the size of a product exceeds
the size of an integer accumulator, its value will wrap around and the
result will be incorrect. Specifying ``dtype=double`` can alleviate
that problem.
Examples
--------
>>> np.nanprod(1)
1
>>> np.nanprod([1])
1
>>> np.nanprod([1, np.nan])
1.0
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanprod(a)
6.0
>>> np.nanprod(a, axis=0)
array([ 3., 2.])
"""
a, mask = _replace_nan(a, 1)
return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def nancumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of array elements over a given axis treating Not a
Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are
encountered and leading NaNs are replaced by zeros.
Zeros are returned for slices that are all-NaN or empty.
.. versionadded:: 1.12.0
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
nancumsum : ndarray.
A new array holding the result is returned unless `out` is
specified, in which it is returned. The result has the same
size as `a`, and the same shape as `a` if `axis` is not None
or `a` is a 1-d array.
See Also
--------
numpy.cumsum : Cumulative sum across array propagating NaNs.
isnan : Show which elements are NaN.
Examples
--------
>>> np.nancumsum(1) #doctest: +SKIP
array([1])
>>> np.nancumsum([1]) #doctest: +SKIP
array([1])
>>> np.nancumsum([1, np.nan]) #doctest: +SKIP
array([ 1., 1.])
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nancumsum(a) #doctest: +SKIP
array([ 1., 3., 6., 6.])
>>> np.nancumsum(a, axis=0) #doctest: +SKIP
array([[ 1., 2.],
[ 4., 2.]])
>>> np.nancumsum(a, axis=1) #doctest: +SKIP
array([[ 1., 3.],
[ 3., 3.]])
"""
a, mask = _replace_nan(a, 0)
return np.cumsum(a, axis=axis, dtype=dtype, out=out)
def nancumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of array elements over a given axis treating Not a
Numbers (NaNs) as one. The cumulative product does not change when NaNs are
encountered and leading NaNs are replaced by ones.
Ones are returned for slices that are all-NaN or empty.
.. versionadded:: 1.12.0
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
nancumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case it is returned.
See Also
--------
numpy.cumprod : Cumulative product across array propagating NaNs.
isnan : Show which elements are NaN.
Examples
--------
>>> np.nancumprod(1) #doctest: +SKIP
array([1])
>>> np.nancumprod([1]) #doctest: +SKIP
array([1])
>>> np.nancumprod([1, np.nan]) #doctest: +SKIP
array([ 1., 1.])
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nancumprod(a) #doctest: +SKIP
array([ 1., 2., 6., 6.])
>>> np.nancumprod(a, axis=0) #doctest: +SKIP
array([[ 1., 2.],
[ 3., 2.]])
>>> np.nancumprod(a, axis=1) #doctest: +SKIP
array([[ 1., 2.],
[ 3., 3.]])
"""
a, mask = _replace_nan(a, 1)
return np.cumprod(a, axis=axis, dtype=dtype, out=out)
| mit |
lbdreyer/iris | lib/iris/tests/test_plot.py | 3 | 31896 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
from functools import wraps
import types
import warnings
import cf_units
import numpy as np
import iris
import iris.coords as coords
import iris.tests.stock
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
import iris.plot as iplt
import iris.quickplot as qplt
import iris.symbols
@tests.skip_data
def simple_cube():
cube = iris.tests.stock.realistic_4d()
cube = cube[:, 0, 0, :]
cube.coord("time").guess_bounds()
return cube
@tests.skip_plot
class TestSimple(tests.GraphicsTest):
def test_points(self):
cube = simple_cube()
qplt.contourf(cube)
self.check_graphic()
def test_bounds(self):
cube = simple_cube()
qplt.pcolor(cube)
self.check_graphic()
@tests.skip_plot
class TestMissingCoord(tests.GraphicsTest):
def _check(self, cube):
qplt.contourf(cube)
self.check_graphic()
qplt.pcolor(cube)
self.check_graphic()
def test_no_u(self):
cube = simple_cube()
cube.remove_coord("grid_longitude")
self._check(cube)
def test_no_v(self):
cube = simple_cube()
cube.remove_coord("time")
self._check(cube)
def test_none(self):
cube = simple_cube()
cube.remove_coord("grid_longitude")
cube.remove_coord("time")
self._check(cube)
@tests.skip_data
@tests.skip_plot
class TestMissingCS(tests.GraphicsTest):
@tests.skip_data
def test_missing_cs(self):
cube = tests.stock.simple_pp()
cube.coord("latitude").coord_system = None
cube.coord("longitude").coord_system = None
qplt.contourf(cube)
qplt.plt.gca().coastlines("110m")
self.check_graphic()
@tests.skip_plot
@tests.skip_data
class TestHybridHeight(tests.GraphicsTest):
def setUp(self):
super().setUp()
self.cube = iris.tests.stock.realistic_4d()[0, :15, 0, :]
def _check(self, plt_method, test_altitude=True):
plt_method(self.cube)
self.check_graphic()
plt_method(self.cube, coords=["level_height", "grid_longitude"])
self.check_graphic()
plt_method(self.cube, coords=["grid_longitude", "level_height"])
self.check_graphic()
if test_altitude:
plt_method(self.cube, coords=["grid_longitude", "altitude"])
self.check_graphic()
plt_method(self.cube, coords=["altitude", "grid_longitude"])
self.check_graphic()
def test_points(self):
self._check(qplt.contourf)
def test_bounds(self):
self._check(qplt.pcolor, test_altitude=False)
def test_orography(self):
qplt.contourf(self.cube)
iplt.orography_at_points(self.cube)
iplt.points(self.cube)
self.check_graphic()
coords = ["altitude", "grid_longitude"]
qplt.contourf(self.cube, coords=coords)
iplt.orography_at_points(self.cube, coords=coords)
iplt.points(self.cube, coords=coords)
self.check_graphic()
# TODO: Test bounds once they are supported.
with self.assertRaises(NotImplementedError):
qplt.pcolor(self.cube)
iplt.orography_at_bounds(self.cube)
iplt.outline(self.cube)
self.check_graphic()
@tests.skip_plot
@tests.skip_data
class Test1dPlotMultiArgs(tests.GraphicsTest):
# tests for iris.plot using multi-argument calling convention
def setUp(self):
super().setUp()
self.cube1d = _load_4d_testcube()[0, :, 0, 0]
self.draw_method = iplt.plot
def test_cube(self):
# just plot a cube against its dim coord
self.draw_method(self.cube1d) # altitude vs temp
self.check_graphic()
def test_coord(self):
# plot the altitude coordinate
self.draw_method(self.cube1d.coord("altitude"))
self.check_graphic()
def test_coord_cube(self):
# plot temperature against sigma
self.draw_method(self.cube1d.coord("sigma"), self.cube1d)
self.check_graphic()
def test_cube_coord(self):
# plot a vertical profile of temperature
self.draw_method(self.cube1d, self.cube1d.coord("altitude"))
self.check_graphic()
def test_coord_coord(self):
# plot two coordinates that are not mappable
self.draw_method(
self.cube1d.coord("sigma"), self.cube1d.coord("altitude")
)
self.check_graphic()
def test_coord_coord_map(self):
# plot lat-lon aux coordinates of a trajectory, which draws a map
lon = iris.coords.AuxCoord(
[0, 5, 10, 15, 20, 25, 30, 35, 40, 45],
standard_name="longitude",
units="degrees_north",
)
lat = iris.coords.AuxCoord(
[45, 55, 50, 60, 55, 65, 60, 70, 65, 75],
standard_name="latitude",
units="degrees_north",
)
self.draw_method(lon, lat)
plt.gca().coastlines("110m")
self.check_graphic()
def test_cube_cube(self):
# plot two phenomena against eachother, in this case just dummy data
cube1 = self.cube1d.copy()
cube2 = self.cube1d.copy()
cube1.rename("some phenomenon")
cube2.rename("some other phenomenon")
cube1.units = cf_units.Unit("no_unit")
cube2.units = cf_units.Unit("no_unit")
cube1.data[:] = np.linspace(0, 1, 7)
cube2.data[:] = np.exp(cube1.data)
self.draw_method(cube1, cube2)
self.check_graphic()
def test_incompatible_objects(self):
# incompatible objects (not the same length) should raise an error
with self.assertRaises(ValueError):
self.draw_method(self.cube1d.coord("time"), (self.cube1d))
def test_multimidmensional(self):
# multidimensional cubes are not allowed
cube = _load_4d_testcube()[0, :, :, 0]
with self.assertRaises(ValueError):
self.draw_method(cube)
def test_not_cube_or_coord(self):
# inputs must be cubes or coordinates, otherwise an error should be
# raised
xdim = np.arange(self.cube1d.shape[0])
with self.assertRaises(TypeError):
self.draw_method(xdim, self.cube1d)
def test_plot_old_coords_kwarg(self):
# Coords used to be a valid kwarg to plot, but it was deprecated and
# we are maintaining a reasonable exception, check that it is raised
# here.
with self.assertRaises(TypeError):
self.draw_method(self.cube1d, coords=None)
@tests.skip_plot
class Test1dQuickplotPlotMultiArgs(Test1dPlotMultiArgs):
# tests for iris.plot using multi-argument calling convention
def setUp(self):
tests.GraphicsTest.setUp(self)
self.cube1d = _load_4d_testcube()[0, :, 0, 0]
self.draw_method = qplt.plot
@tests.skip_data
@tests.skip_plot
class Test1dScatter(tests.GraphicsTest):
def setUp(self):
super().setUp()
self.cube = iris.load_cube(
tests.get_data_path(("NAME", "NAMEIII_trajectory.txt")),
"Temperature",
)
self.draw_method = iplt.scatter
def test_coord_coord(self):
x = self.cube.coord("longitude")
y = self.cube.coord("altitude")
c = self.cube.data
self.draw_method(x, y, c=c, edgecolor="none")
self.check_graphic()
def test_coord_coord_map(self):
x = self.cube.coord("longitude")
y = self.cube.coord("latitude")
c = self.cube.data
self.draw_method(x, y, c=c, edgecolor="none")
plt.gca().coastlines("110m")
self.check_graphic()
def test_coord_cube(self):
x = self.cube.coord("latitude")
y = self.cube
c = self.cube.coord("Travel Time").points
self.draw_method(x, y, c=c, edgecolor="none")
self.check_graphic()
def test_cube_coord(self):
x = self.cube
y = self.cube.coord("altitude")
c = self.cube.coord("Travel Time").points
self.draw_method(x, y, c=c, edgecolor="none")
self.check_graphic()
def test_cube_cube(self):
x = iris.load_cube(
tests.get_data_path(("NAME", "NAMEIII_trajectory.txt")),
"Rel Humidity",
)
y = self.cube
c = self.cube.coord("Travel Time").points
self.draw_method(x, y, c=c, edgecolor="none")
self.check_graphic()
def test_incompatible_objects(self):
# cubes/coordinates of different sizes cannot be plotted
x = self.cube
y = self.cube.coord("altitude")[:-1]
with self.assertRaises(ValueError):
self.draw_method(x, y)
def test_multidimensional(self):
# multidimensional cubes/coordinates are not allowed
x = _load_4d_testcube()[0, :, :, 0]
y = x.coord("model_level_number")
with self.assertRaises(ValueError):
self.draw_method(x, y)
def test_not_cube_or_coord(self):
# inputs must be cubes or coordinates
x = np.arange(self.cube.shape[0])
y = self.cube
with self.assertRaises(TypeError):
self.draw_method(x, y)
@tests.skip_data
@tests.skip_plot
class Test1dQuickplotScatter(Test1dScatter):
def setUp(self):
tests.GraphicsTest.setUp(self)
self.cube = iris.load_cube(
tests.get_data_path(("NAME", "NAMEIII_trajectory.txt")),
"Temperature",
)
self.draw_method = qplt.scatter
@tests.skip_data
@tests.skip_plot
class TestAttributePositive(tests.GraphicsTest):
def test_1d_positive_up(self):
path = tests.get_data_path(("NetCDF", "ORCA2", "votemper.nc"))
cube = iris.load_cube(path)
qplt.plot(cube.coord("depth"), cube[0, :, 60, 80])
self.check_graphic()
def test_1d_positive_down(self):
path = tests.get_data_path(("NetCDF", "ORCA2", "votemper.nc"))
cube = iris.load_cube(path)
qplt.plot(cube[0, :, 60, 80], cube.coord("depth"))
self.check_graphic()
def test_2d_positive_up(self):
path = tests.get_data_path(
("NetCDF", "testing", "small_theta_colpex.nc")
)
cube = iris.load_cube(path, "air_potential_temperature")[0, :, 42, :]
qplt.pcolormesh(cube)
self.check_graphic()
def test_2d_positive_down(self):
path = tests.get_data_path(("NetCDF", "ORCA2", "votemper.nc"))
cube = iris.load_cube(path)[0, :, 42, :]
qplt.pcolormesh(cube)
self.check_graphic()
# Caches _load_4d_testcube so subsequent calls are faster
def cache(fn, cache={}):
def inner(*args, **kwargs):
key = fn.__name__
if key not in cache:
cache[key] = fn(*args, **kwargs)
return cache[key]
return inner
@cache
@tests.skip_data
def _load_4d_testcube():
# Load example 4d data (TZYX).
test_cube = iris.tests.stock.realistic_4d()
# Replace forecast_period coord with a multi-valued version.
time_coord = test_cube.coord("time")
n_times = len(time_coord.points)
forecast_dims = test_cube.coord_dims(time_coord)
test_cube.remove_coord("forecast_period")
# Make up values (including bounds), to roughly match older testdata.
point_values = np.linspace((1 + 1.0 / 6), 2.0, n_times)
point_uppers = point_values + (point_values[1] - point_values[0])
bound_values = np.column_stack([point_values, point_uppers])
# NOTE: this must be a DimCoord
# - an equivalent AuxCoord produces different plots.
new_forecast_coord = iris.coords.DimCoord(
points=point_values,
bounds=bound_values,
standard_name="forecast_period",
units=cf_units.Unit("hours"),
)
test_cube.add_aux_coord(new_forecast_coord, forecast_dims)
# Heavily reduce dimensions for faster testing.
# NOTE: this makes ZYX non-contiguous. Doesn't seem to matter for now.
test_cube = test_cube[:, ::10, ::10, ::10]
return test_cube
@cache
def _load_wind_no_bounds():
# Load the COLPEX data => TZYX
path = tests.get_data_path(("PP", "COLPEX", "small_eastward_wind.pp"))
wind = iris.load_cube(path, "x_wind")
# Remove bounds from all coords that have them.
wind.coord("grid_latitude").bounds = None
wind.coord("grid_longitude").bounds = None
wind.coord("level_height").bounds = None
wind.coord("sigma").bounds = None
return wind[:, :, :50, :50]
def _time_series(src_cube):
# Until we have plotting support for multiple axes on the same dimension,
# remove the time coordinate and its axis.
cube = src_cube.copy()
cube.remove_coord("time")
return cube
def _date_series(src_cube):
# Until we have plotting support for multiple axes on the same dimension,
# remove the forecast_period coordinate and its axis.
cube = src_cube.copy()
cube.remove_coord("forecast_period")
return cube
@tests.skip_plot
class SliceMixin:
"""Mixin class providing tests for each 2-dimensional permutation of axes.
Requires self.draw_method to be the relevant plotting function,
and self.results to be a dictionary containing the desired test results."""
def test_yx(self):
cube = self.wind[0, 0, :, :]
self.draw_method(cube)
self.check_graphic()
def test_zx(self):
cube = self.wind[0, :, 0, :]
self.draw_method(cube)
self.check_graphic()
def test_tx(self):
cube = _time_series(self.wind[:, 0, 0, :])
self.draw_method(cube)
self.check_graphic()
def test_zy(self):
cube = self.wind[0, :, :, 0]
self.draw_method(cube)
self.check_graphic()
def test_ty(self):
cube = _time_series(self.wind[:, 0, :, 0])
self.draw_method(cube)
self.check_graphic()
def test_tz(self):
cube = _time_series(self.wind[:, :, 0, 0])
self.draw_method(cube)
self.check_graphic()
@tests.skip_data
class TestContour(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.contour routine."""
def setUp(self):
super().setUp()
self.wind = _load_4d_testcube()
self.draw_method = iplt.contour
@tests.skip_data
class TestContourf(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.contourf routine."""
def setUp(self):
super().setUp()
self.wind = _load_4d_testcube()
self.draw_method = iplt.contourf
@tests.skip_data
class TestPcolor(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.pcolor routine."""
def setUp(self):
super().setUp()
self.wind = _load_4d_testcube()
self.draw_method = iplt.pcolor
@tests.skip_data
class TestPcolormesh(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.pcolormesh routine."""
def setUp(self):
super().setUp()
self.wind = _load_4d_testcube()
self.draw_method = iplt.pcolormesh
def check_warnings(method):
"""
Decorator that adds a catch_warnings and filter to assert
the method being decorated issues a UserWarning.
"""
@wraps(method)
def decorated_method(self, *args, **kwargs):
# Force reset of iris.coords warnings registry to avoid suppression of
# repeated warnings. warnings.resetwarnings() does not do this.
if hasattr(coords, "__warningregistry__"):
coords.__warningregistry__.clear()
# Check that method raises warning.
with warnings.catch_warnings():
warnings.simplefilter("error")
with self.assertRaises(UserWarning):
return method(self, *args, **kwargs)
return decorated_method
def ignore_warnings(method):
"""
Decorator that adds a catch_warnings and filter to suppress
any warnings issues by the method being decorated.
"""
@wraps(method)
def decorated_method(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return method(self, *args, **kwargs)
return decorated_method
class CheckForWarningsMetaclass(type):
"""
Metaclass that adds a further test for each base class test
that checks that each test raises a UserWarning. Each base
class test is then overriden to ignore warnings in order to
check the underlying functionality.
"""
def __new__(cls, name, bases, local):
def add_decorated_methods(attr_dict, target_dict, decorator):
for key, value in attr_dict.items():
if isinstance(value, types.FunctionType) and key.startswith(
"test"
):
new_key = "_".join((key, decorator.__name__))
if new_key not in target_dict:
wrapped = decorator(value)
wrapped.__name__ = new_key
target_dict[new_key] = wrapped
else:
raise RuntimeError(
"A attribute called {!r} "
"already exists.".format(new_key)
)
def override_with_decorated_methods(attr_dict, target_dict, decorator):
for key, value in attr_dict.items():
if isinstance(value, types.FunctionType) and key.startswith(
"test"
):
target_dict[key] = decorator(value)
# Add decorated versions of base methods
# to check for warnings.
for base in bases:
add_decorated_methods(base.__dict__, local, check_warnings)
# Override base methods to ignore warnings.
for base in bases:
override_with_decorated_methods(
base.__dict__, local, ignore_warnings
)
return type.__new__(cls, name, bases, local)
@tests.skip_data
@tests.iristest_timing_decorator
class TestPcolorNoBounds(
tests.GraphicsTest_nometa, SliceMixin, metaclass=CheckForWarningsMetaclass
):
"""
Test the iris.plot.pcolor routine on a cube with coordinates
that have no bounds.
"""
def setUp(self):
super().setUp()
self.wind = _load_wind_no_bounds()
self.draw_method = iplt.pcolor
@tests.skip_data
@tests.iristest_timing_decorator
class TestPcolormeshNoBounds(
tests.GraphicsTest_nometa, SliceMixin, metaclass=CheckForWarningsMetaclass
):
"""
Test the iris.plot.pcolormesh routine on a cube with coordinates
that have no bounds.
"""
def setUp(self):
super().setUp()
self.wind = _load_wind_no_bounds()
self.draw_method = iplt.pcolormesh
@tests.skip_plot
class Slice1dMixin:
"""Mixin class providing tests for each 1-dimensional permutation of axes.
Requires self.draw_method to be the relevant plotting function,
and self.results to be a dictionary containing the desired test results."""
def test_x(self):
cube = self.wind[0, 0, 0, :]
self.draw_method(cube)
self.check_graphic()
def test_y(self):
cube = self.wind[0, 0, :, 0]
self.draw_method(cube)
self.check_graphic()
def test_z(self):
cube = self.wind[0, :, 0, 0]
self.draw_method(cube)
self.check_graphic()
def test_t(self):
cube = _time_series(self.wind[:, 0, 0, 0])
self.draw_method(cube)
self.check_graphic()
def test_t_dates(self):
cube = _date_series(self.wind[:, 0, 0, 0])
self.draw_method(cube)
plt.gcf().autofmt_xdate()
plt.xlabel("Phenomenon time")
self.check_graphic()
@tests.skip_data
class TestPlot(tests.GraphicsTest, Slice1dMixin):
"""Test the iris.plot.plot routine."""
def setUp(self):
super().setUp()
self.wind = _load_4d_testcube()
self.draw_method = iplt.plot
@tests.skip_data
class TestQuickplotPlot(tests.GraphicsTest, Slice1dMixin):
"""Test the iris.quickplot.plot routine."""
def setUp(self):
super().setUp()
self.wind = _load_4d_testcube()
self.draw_method = qplt.plot
_load_cube_once_cache = {}
def load_cube_once(filename, constraint):
"""Same syntax as load_cube, but will only load a file once,
then cache the answer in a dictionary.
"""
global _load_cube_once_cache
key = (filename, str(constraint))
cube = _load_cube_once_cache.get(key, None)
if cube is None:
cube = iris.load_cube(filename, constraint)
_load_cube_once_cache[key] = cube
return cube
class LambdaStr:
"""Provides a callable function which has a sensible __repr__."""
def __init__(self, repr, lambda_fn):
self.repr = repr
self.lambda_fn = lambda_fn
def __call__(self, *args, **kwargs):
return self.lambda_fn(*args, **kwargs)
def __repr__(self):
return self.repr
@tests.skip_data
@tests.skip_plot
class TestPlotCoordinatesGiven(tests.GraphicsTest):
def setUp(self):
super().setUp()
filename = tests.get_data_path(
("PP", "COLPEX", "theta_and_orog_subset.pp")
)
self.cube = load_cube_once(filename, "air_potential_temperature")
self.draw_module = iris.plot
self.contourf = LambdaStr(
"iris.plot.contourf",
lambda cube, *args, **kwargs: iris.plot.contourf(
cube, *args, **kwargs
),
)
self.contour = LambdaStr(
"iris.plot.contour",
lambda cube, *args, **kwargs: iris.plot.contour(
cube, *args, **kwargs
),
)
self.points = LambdaStr(
"iris.plot.points",
lambda cube, *args, **kwargs: iris.plot.points(
cube, c=cube.data, *args, **kwargs
),
)
self.plot = LambdaStr(
"iris.plot.plot",
lambda cube, *args, **kwargs: iris.plot.plot(
cube, *args, **kwargs
),
)
self.results = {
"yx": (
[self.contourf, ["grid_latitude", "grid_longitude"]],
[self.contourf, ["grid_longitude", "grid_latitude"]],
[self.contour, ["grid_latitude", "grid_longitude"]],
[self.contour, ["grid_longitude", "grid_latitude"]],
[self.points, ["grid_latitude", "grid_longitude"]],
[self.points, ["grid_longitude", "grid_latitude"]],
),
"zx": (
[self.contourf, ["model_level_number", "grid_longitude"]],
[self.contourf, ["grid_longitude", "model_level_number"]],
[self.contour, ["model_level_number", "grid_longitude"]],
[self.contour, ["grid_longitude", "model_level_number"]],
[self.points, ["model_level_number", "grid_longitude"]],
[self.points, ["grid_longitude", "model_level_number"]],
),
"tx": (
[self.contourf, ["time", "grid_longitude"]],
[self.contourf, ["grid_longitude", "time"]],
[self.contour, ["time", "grid_longitude"]],
[self.contour, ["grid_longitude", "time"]],
[self.points, ["time", "grid_longitude"]],
[self.points, ["grid_longitude", "time"]],
),
"x": ([self.plot, ["grid_longitude"]],),
"y": ([self.plot, ["grid_latitude"]],),
}
def draw(self, draw_method, *args, **kwargs):
draw_fn = getattr(self.draw_module, draw_method)
draw_fn(*args, **kwargs)
self.check_graphic()
def run_tests(self, cube, results):
for draw_method, rcoords in results:
draw_method(cube, coords=rcoords)
try:
self.check_graphic()
except AssertionError as err:
self.fail(
"Draw method %r failed with coords: %r. "
"Assertion message: %s" % (draw_method, rcoords, err)
)
def run_tests_1d(self, cube, results):
# there is a different calling convention for 1d plots
for draw_method, rcoords in results:
draw_method(cube.coord(rcoords[0]), cube)
try:
self.check_graphic()
except AssertionError as err:
msg = (
"Draw method {!r} failed with coords: {!r}. "
"Assertion message: {!s}"
)
self.fail(msg.format(draw_method, rcoords, err))
def test_yx(self):
test_cube = self.cube[0, 0, :, :]
self.run_tests(test_cube, self.results["yx"])
def test_zx(self):
test_cube = self.cube[0, :15, 0, :]
self.run_tests(test_cube, self.results["zx"])
def test_tx(self):
test_cube = self.cube[:, 0, 0, :]
self.run_tests(test_cube, self.results["tx"])
def test_x(self):
test_cube = self.cube[0, 0, 0, :]
self.run_tests_1d(test_cube, self.results["x"])
def test_y(self):
test_cube = self.cube[0, 0, :, 0]
self.run_tests_1d(test_cube, self.results["y"])
def test_badcoords(self):
cube = self.cube[0, 0, :, :]
draw_fn = getattr(self.draw_module, "contourf")
self.assertRaises(
ValueError,
draw_fn,
cube,
coords=["grid_longitude", "grid_longitude"],
)
self.assertRaises(
ValueError,
draw_fn,
cube,
coords=["grid_longitude", "grid_longitude", "grid_latitude"],
)
self.assertRaises(
iris.exceptions.CoordinateNotFoundError,
draw_fn,
cube,
coords=["grid_longitude", "wibble"],
)
self.assertRaises(ValueError, draw_fn, cube, coords=[])
self.assertRaises(
ValueError,
draw_fn,
cube,
coords=[
cube.coord("grid_longitude"),
cube.coord("grid_longitude"),
],
)
self.assertRaises(
ValueError,
draw_fn,
cube,
coords=[
cube.coord("grid_longitude"),
cube.coord("grid_longitude"),
cube.coord("grid_longitude"),
],
)
def test_non_cube_coordinate(self):
cube = self.cube[0, :, :, 0]
pts = -100 + np.arange(cube.shape[1]) * 13
x = coords.DimCoord(
pts,
standard_name="model_level_number",
attributes={"positive": "up"},
units="1",
)
self.draw("contourf", cube, coords=["grid_latitude", x])
@tests.skip_data
@tests.skip_plot
class TestPlotDimAndAuxCoordsKwarg(tests.GraphicsTest):
def setUp(self):
super().setUp()
filename = tests.get_data_path(
("NetCDF", "rotated", "xy", "rotPole_landAreaFraction.nc")
)
self.cube = iris.load_cube(filename)
def test_default(self):
iplt.contourf(self.cube)
plt.gca().coastlines("110m")
self.check_graphic()
def test_coords(self):
# Pass in dimension coords.
rlat = self.cube.coord("grid_latitude")
rlon = self.cube.coord("grid_longitude")
iplt.contourf(self.cube, coords=[rlon, rlat])
plt.gca().coastlines("110m")
self.check_graphic()
# Pass in auxiliary coords.
lat = self.cube.coord("latitude")
lon = self.cube.coord("longitude")
iplt.contourf(self.cube, coords=[lon, lat])
plt.gca().coastlines("110m")
self.check_graphic()
def test_coord_names(self):
# Pass in names of dimension coords.
iplt.contourf(self.cube, coords=["grid_longitude", "grid_latitude"])
plt.gca().coastlines("110m")
self.check_graphic()
# Pass in names of auxiliary coords.
iplt.contourf(self.cube, coords=["longitude", "latitude"])
plt.gca().coastlines("110m")
self.check_graphic()
def test_yx_order(self):
# Do not attempt to draw coastlines as it is not a map.
iplt.contourf(self.cube, coords=["grid_latitude", "grid_longitude"])
self.check_graphic()
iplt.contourf(self.cube, coords=["latitude", "longitude"])
self.check_graphic()
@tests.skip_plot
class TestSymbols(tests.GraphicsTest):
def test_cloud_cover(self):
iplt.symbols(
list(range(10)),
[0] * 10,
[iris.symbols.CLOUD_COVER[i] for i in range(10)],
0.375,
)
iplt.plt.axis("off")
self.check_graphic()
@tests.skip_plot
class TestPlottingExceptions(tests.IrisTest):
def setUp(self):
self.bounded_cube = tests.stock.lat_lon_cube()
self.bounded_cube.coord("latitude").guess_bounds()
self.bounded_cube.coord("longitude").guess_bounds()
def test_boundmode_multidim(self):
# Test exception translation.
# We can't get contiguous bounded grids from multi-d coords.
cube = self.bounded_cube
cube.remove_coord("latitude")
cube.add_aux_coord(
coords.AuxCoord(
points=cube.data, standard_name="latitude", units="degrees"
),
[0, 1],
)
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=["longitude", "latitude"])
def test_boundmode_4bounds(self):
# Test exception translation.
# We can only get contiguous bounded grids with 2 bounds per point.
cube = self.bounded_cube
lat = coords.AuxCoord.from_coord(cube.coord("latitude"))
lat.bounds = np.array(
[lat.points, lat.points + 1, lat.points + 2, lat.points + 3]
).transpose()
cube.remove_coord("latitude")
cube.add_aux_coord(lat, 0)
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=["longitude", "latitude"])
def test_different_coord_systems(self):
cube = self.bounded_cube
lat = cube.coord("latitude")
lon = cube.coord("longitude")
lat.coord_system = iris.coord_systems.GeogCS(7000000)
lon.coord_system = iris.coord_systems.GeogCS(7000001)
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=["longitude", "latitude"])
@tests.skip_data
@tests.skip_plot
class TestPlotOtherCoordSystems(tests.GraphicsTest):
def test_plot_tmerc(self):
filename = tests.get_data_path(
("NetCDF", "transverse_mercator", "tmean_1910_1910.nc")
)
self.cube = iris.load_cube(filename)
iplt.pcolormesh(self.cube[0])
plt.gca().coastlines("110m")
self.check_graphic()
@tests.skip_plot
class TestPlotCitation(tests.GraphicsTest):
def setUp(self):
super().setUp()
self.figure = plt.figure()
self.axes = self.figure.gca()
self.text = (
"Lorem ipsum dolor sit amet, consectetur adipiscing "
"elit, sed do eiusmod tempor incididunt ut labore et "
"dolore magna aliqua."
)
def test(self):
iplt.citation(self.text)
self.check_graphic()
def test_figure(self):
iplt.citation(self.text, figure=self.figure)
self.check_graphic()
def test_axes(self):
iplt.citation(self.text, axes=self.axes)
self.check_graphic()
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
bdmckean/MachineLearning | fall_2017/hw3/CNN.py | 2 | 4626 |
import argparse
import pickle
import gzip
from collections import Counter, defaultdict
import keras
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import Dense
from keras.layers import MaxPool2D
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.core import Reshape
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
class Numbers:
"""
Class to store MNIST data
"""
def __init__(self, location):
# Load the dataset
with gzip.open(location, 'rb') as f:
train_set, valid_set, test_set = pickle.load(f)
self.train_x, self.train_y = train_set
self.test_x, self.test_y = valid_set
class CNN:
'''
CNN classifier
'''
#def __init__(self, train_x, train_y, test_x, test_y, history, epochs = 15, batch_size=128, ):
def __init__(self, train_x, train_y, test_x, test_y, epochs = 15, batch_size=128, ):
'''
initialize CNN classifier
'''
self.batch_size = batch_size
self.epochs = epochs
print (len(train_x))
print (len([elem for elem in train_x]))
# TODO: reshape train_x and test_x
# reshape our data from (n, length) to (n, width, height, 1) which width*height = length
#self.train_x = np.array(np.array([train_x[i:i + 28] for i in range(0, len(train_x), 28)]))
#self.train_x = np.array([[elem[i:i + 28] for i in range(0, len(elem), 28)] for elem in train_x])
#self.test_x = np.array([[elem[i:i + 28] for i in range(0, len(elem), 28)] for elem in test_x])
self.train_y = np.array(train_y)
self.test_y = np.array(test_y)
# input image dimensions
img_x, img_y = 28, 28
input_shape = (img_x, img_y, 1)
# TODO: reshape train_x and test_x
self.train_x = train_x.reshape(train_x.shape[0], img_x, img_y, 1)
self.test_x = test_x.reshape(test_x.shape[0], img_x, img_y, 1)
print (self.train_x.shape, self.test_x.shape, self.train_y.shape, self.test_y.shape)
# normalize data to range [0, 1]
#self.train_x /= 255
#self.test_x /= 255
# TODO: one hot encoding for train_y and test_y
num_classes = len(set(train_y))
one_hot_train_y = [ [0 if elem != idx else 1 for idx in range(num_classes) ] for elem in train_y]
one_hot_test_y = [ [0 if elem != idx else 1 for idx in range(num_classes) ] for elem in test_y]
self.train_y = one_hot_train_y
self.test_y = one_hot_test_y
# TODO: build you CNN model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), input_shape=(28, 28, 1), activation='relu'))
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.50))
model.add(Dense(10, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.SGD(lr=0.01),
metrics=['accuracy'])
self.model = model
def train(self):
'''
train CNN classifier with training data
:param x: training data input
:param y: training label input
:return:
'''
# TODO: fit in training data
self.model.fit(self.train_x, self.train_y,
batch_size=self.batch_size,
epochs=self.epochs,
verbose=1,
validation_data=(self.test_x, self.test_y))
#,
#callbacks=[history])
def evaluate(self):
'''
test CNN classifier and get accuracy
:return: accuracy
'''
acc = self.model.evaluate(self.test_x, self.test_y)
return acc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='CNN classifier options')
parser.add_argument('--limit', type=int, default=-1,
help='Restrict training to this many examples')
args = parser.parse_args()
data = Numbers("../data/mnist.pkl.gz")
#print ( data.train_x.shape, data.test_x.shape, data.train_y.shape, data.test_y.shape )
args.limit = 50000
cnn = CNN(data.train_x[:args.limit], data.train_y[:args.limit], data.test_x, data.test_y, batch_size=32)
cnn.train()
acc = cnn.evaluate()
print(acc)
#print(cnn.model.summary())
| mit |
h2oai/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_PUBDEV_7269_multinomial_auc_large.py | 2 | 6184 | from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from tests.pyunit_utils import roc_auc_score
def multinomial_auc_prostate_gbm():
data = h2o.import_file(pyunit_utils.locate("smalldata/logreg/prostate.csv"))
response_col = "GLEASON"
data[response_col] = data[response_col].asfactor()
predictors = ["RACE", "AGE", "PSA", "DPROS", "CAPSULE", "VOL", "DCAPS"]
distribution = "multinomial"
# train model
gbm = H2OGradientBoostingEstimator(ntrees=1, max_depth=2, nfolds=3, distribution=distribution, auc_type="WEIGHTED_OVR")
gbm.train(x=predictors, y=response_col, training_frame=data)
gbm.show()
# get result on training data from h2o
cm = gbm.confusion_matrix(data)
h2o_auc_table = gbm.multinomial_auc_table(train=True)
h2o_aucpr_table = gbm.multinomial_aucpr_table(train=True)
print(cm)
print(h2o_auc_table.as_data_frame())
print(h2o_aucpr_table.as_data_frame())
h2o_ovr_macro_auc = h2o_auc_table[3][7]
h2o_ovr_weighted_auc = h2o_auc_table[3][8]
h2o_ovo_macro_auc = h2o_auc_table[3][30]
h2o_ovo_weighted_auc = h2o_auc_table[3][31]
h2o_ovr_weighted_aucpr = h2o_aucpr_table[3][8]
h2o_default_auc = gbm.auc()
h2o_default_aucpr = gbm.aucpr()
print("default vs. table AUC "+str(h2o_ovr_weighted_auc)+" "+str(h2o_default_auc))
print("default vs. table PR AUC "+str(h2o_ovr_weighted_aucpr)+" "+str(h2o_default_aucpr))
# default should be ovr weighted
assert h2o_ovr_weighted_auc == h2o_default_auc, "default vs. table AUC "+str(h2o_ovr_weighted_auc)+" != "+str(h2o_default_auc)
assert h2o_ovr_weighted_aucpr == h2o_default_aucpr, "default vs. table PR AUC "+str(h2o_ovr_weighted_aucpr)+" != "+str(h2o_default_aucpr)
# transform data for sklearn
prediction = gbm.predict(data).as_data_frame().iloc[:,1:]
actual = data[response_col].as_data_frame().iloc[:, 0].tolist()
# get result on training data from sklearn
sklearn_ovr_macro_auc = roc_auc_score(actual, prediction, multi_class="ovr", average='macro')
sklearn_ovr_weighted_auc = roc_auc_score(actual, prediction, multi_class="ovr", average='weighted')
sklearn_ovo_macro_auc = roc_auc_score(actual, prediction, multi_class="ovo", average='macro')
sklearn_ovo_weighted_auc = roc_auc_score(actual, prediction, multi_class="ovo", average='weighted')
print("sklearn vs. h2o ovr macro: "+str(sklearn_ovr_macro_auc)+" "+str(h2o_ovr_macro_auc))
print("sklearn vs. h2o ovr weighted: "+str(sklearn_ovr_weighted_auc)+" "+str(h2o_ovr_weighted_auc))
print("sklearn vs. h2o ovo macro: "+str(sklearn_ovo_macro_auc)+" "+str(h2o_ovo_macro_auc))
print("sklearn vs. h2o ovo weighted: "+str(sklearn_ovo_weighted_auc)+" "+str(h2o_ovo_weighted_auc))
# compare results h2o vs sklearn
precision = 1e-7
assert abs(h2o_ovr_macro_auc - sklearn_ovr_macro_auc) < precision, "sklearn vs. h2o ovr macro: "+str(sklearn_ovr_macro_auc)+" != "+str(h2o_ovr_macro_auc)
assert abs(h2o_ovr_weighted_auc - sklearn_ovr_weighted_auc) < precision, "sklearn vs. h2o ovr weighted: "+str(sklearn_ovr_weighted_auc)+" != "+str(h2o_ovr_weighted_auc)
assert abs(h2o_ovo_macro_auc - sklearn_ovo_macro_auc) < precision, "sklearn vs. h2o ovo macro: "+str(sklearn_ovo_macro_auc)+" != "+str(h2o_ovo_macro_auc)
assert abs(h2o_ovo_weighted_auc - sklearn_ovo_weighted_auc) < precision, "sklearn vs. h2o ovo weighted: "+str(sklearn_ovo_weighted_auc)+" != "+str(h2o_ovo_weighted_auc)
# set auc_type
gbm = H2OGradientBoostingEstimator(ntrees=1, max_depth=2, nfolds=3, distribution=distribution, auc_type="MACRO_OVR")
gbm.train(x=predictors, y=response_col, training_frame=data, validation_frame=data)
h2o_auc_table = gbm.multinomial_auc_table(train=True)
h2o_aucpr_table = gbm.multinomial_aucpr_table(train=True)
h2o_ovr_macro_auc = h2o_auc_table[3][7]
h2o_ovr_macro_aucpr = h2o_aucpr_table[3][7]
h2o_default_auc = gbm.auc()
h2o_default_aucpr = gbm.aucpr()
assert abs(h2o_ovr_macro_auc - h2o_default_auc) < precision, "default auc vs. h2o ovr macro auc: "+str(sklearn_ovr_macro_auc)+" != "+str(h2o_default_auc)
assert abs(h2o_ovr_macro_aucpr - h2o_default_aucpr) < precision, "default aucpr vs. h2o ovr macro aucpr: "+str(h2o_ovr_macro_aucpr)+" != "+str(h2o_default_aucpr)
# test early stopping
ntrees = 100
gbm2 = H2OGradientBoostingEstimator(ntrees=ntrees, max_depth=2, nfolds=3, distribution=distribution, score_each_iteration=True, auc_type="MACRO_OVR", stopping_metric="AUC", stopping_rounds=3)
gbm2.train(x=predictors, y=response_col, training_frame=data, validation_frame=data)
assert ntrees > gbm2.score_history().shape[0], "Test early stopping: Training should start early."
# test performance with different auc type
perf2 = gbm.model_performance(data, auc_type="WEIGHTED_OVO")
perf2_auc = perf2.auc()
assert abs(h2o_ovo_weighted_auc - perf2_auc) < precision, "h2o ovo weighted vs. h2o performance ovo weighted: "+str(h2o_ovo_weighted_auc)+" != "+str(perf2_auc)
# test peformance with no data and auc_type is set
ntrees = 2
gbm3 = H2OGradientBoostingEstimator(ntrees=ntrees, max_depth=2, nfolds=3, distribution=distribution)
gbm3.train(x=predictors, y=response_col, training_frame=data, validation_frame=data)
perf3 = gbm3.model_performance(train=True, auc_type="WEIGHTED_OVO")
perf3_auc = perf3.auc()
assert perf3_auc == "NaN", "AUC should be \"NaN\" because it is not set in model parameters and test_data is None"
# test aucpr is not in cv summary
print(gbm._model_json["output"]["cv_scoring_history"][0]._col_header)
assert not "aucpr" in gbm.cross_validation_metrics_summary()[0], "The aucpr should not be in cross-validation metrics summary."
assert "pr_auc" in gbm.cross_validation_metrics_summary()[0], "The pr_auc should be in cross-validation metrics summary."
if __name__ == "__main__":
pyunit_utils.standalone_test(multinomial_auc_prostate_gbm)
else:
multinomial_auc_prostate_gbm()
| apache-2.0 |
ahnitz/pycbc | pycbc/results/scatter_histograms.py | 4 | 29832 | # Copyright (C) 2016 Miriam Cabero Mueller, Collin Capano
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
Module to generate figures with scatter plots and histograms.
"""
import itertools
import sys
import numpy
import scipy.stats
import matplotlib
# Only if a backend is not already set ... This should really *not* be done
# here, but in the executables you should set matplotlib.use()
# This matches the check that matplotlib does internally, but this *may* be
# version dependenant. If this is a problem then remove this and control from
# the executables directly.
if 'matplotlib.backends' not in sys.modules: # nopep8
matplotlib.use('agg')
from matplotlib import (offsetbox, pyplot, gridspec)
from pycbc.results import str_utils
from pycbc.io import FieldArray
def create_axes_grid(parameters, labels=None, height_ratios=None,
width_ratios=None, no_diagonals=False):
"""Given a list of parameters, creates a figure with an axis for
every possible combination of the parameters.
Parameters
----------
parameters : list
Names of the variables to be plotted.
labels : {None, dict}, optional
A dictionary of parameters -> parameter labels.
height_ratios : {None, list}, optional
Set the height ratios of the axes; see `matplotlib.gridspec.GridSpec`
for details.
width_ratios : {None, list}, optional
Set the width ratios of the axes; see `matplotlib.gridspec.GridSpec`
for details.
no_diagonals : {False, bool}, optional
Do not produce axes for the same parameter on both axes.
Returns
-------
fig : pyplot.figure
The figure that was created.
axis_dict : dict
A dictionary mapping the parameter combinations to the axis and their
location in the subplots grid; i.e., the key, values are:
`{('param1', 'param2'): (pyplot.axes, row index, column index)}`
"""
if labels is None:
labels = {p: p for p in parameters}
elif any(p not in labels for p in parameters):
raise ValueError("labels must be provided for all parameters")
# Create figure with adequate size for number of parameters.
ndim = len(parameters)
if no_diagonals:
ndim -= 1
if ndim < 3:
fsize = (8, 7)
else:
fsize = (ndim*3 - 1, ndim*3 - 2)
fig = pyplot.figure(figsize=fsize)
# create the axis grid
gs = gridspec.GridSpec(ndim, ndim, width_ratios=width_ratios,
height_ratios=height_ratios,
wspace=0.05, hspace=0.05)
# create grid of axis numbers to easily create axes in the right locations
axes = numpy.arange(ndim**2).reshape((ndim, ndim))
# Select possible combinations of plots and establish rows and columns.
combos = list(itertools.combinations(parameters, 2))
# add the diagonals
if not no_diagonals:
combos += [(p, p) for p in parameters]
# create the mapping between parameter combos and axes
axis_dict = {}
# cycle over all the axes, setting thing as needed
for nrow in range(ndim):
for ncolumn in range(ndim):
ax = pyplot.subplot(gs[axes[nrow, ncolumn]])
# map to a parameter index
px = parameters[ncolumn]
if no_diagonals:
py = parameters[nrow+1]
else:
py = parameters[nrow]
if (px, py) in combos:
axis_dict[px, py] = (ax, nrow, ncolumn)
# x labels only on bottom
if nrow + 1 == ndim:
ax.set_xlabel('{}'.format(labels[px]), fontsize=18)
else:
pyplot.setp(ax.get_xticklabels(), visible=False)
# y labels only on left
if ncolumn == 0:
ax.set_ylabel('{}'.format(labels[py]), fontsize=18)
else:
pyplot.setp(ax.get_yticklabels(), visible=False)
else:
# make non-used axes invisible
ax.axis('off')
return fig, axis_dict
def get_scale_fac(fig, fiducial_width=8, fiducial_height=7):
"""Gets a factor to scale fonts by for the given figure. The scale
factor is relative to a figure with dimensions
(`fiducial_width`, `fiducial_height`).
"""
width, height = fig.get_size_inches()
return (width*height/(fiducial_width*fiducial_height))**0.5
def construct_kde(samples_array, use_kombine=False):
"""Constructs a KDE from the given samples.
"""
if use_kombine:
try:
import kombine
except ImportError:
raise ImportError("kombine is not installed.")
# construct the kde
if use_kombine:
kde = kombine.clustered_kde.KDE(samples_array)
else:
kde = scipy.stats.gaussian_kde(samples_array.T)
return kde
def create_density_plot(xparam, yparam, samples, plot_density=True,
plot_contours=True, percentiles=None, cmap='viridis',
contour_color=None, xmin=None, xmax=None,
ymin=None, ymax=None, exclude_region=None,
fig=None, ax=None, use_kombine=False):
"""Computes and plots posterior density and confidence intervals using the
given samples.
Parameters
----------
xparam : string
The parameter to plot on the x-axis.
yparam : string
The parameter to plot on the y-axis.
samples : dict, numpy structured array, or FieldArray
The samples to plot.
plot_density : {True, bool}
Plot a color map of the density.
plot_contours : {True, bool}
Plot contours showing the n-th percentiles of the density.
percentiles : {None, float or array}
What percentile contours to draw. If None, will plot the 50th
and 90th percentiles.
cmap : {'viridis', string}
The name of the colormap to use for the density plot.
contour_color : {None, string}
What color to make the contours. Default is white for density
plots and black for other plots.
xmin : {None, float}
Minimum value to plot on x-axis.
xmax : {None, float}
Maximum value to plot on x-axis.
ymin : {None, float}
Minimum value to plot on y-axis.
ymax : {None, float}
Maximum value to plot on y-axis.
exclue_region : {None, str}
Exclude the specified region when plotting the density or contours.
Must be a string in terms of `xparam` and `yparam` that is
understandable by numpy's logical evaluation. For example, if
`xparam = m_1` and `yparam = m_2`, and you want to exclude the region
for which `m_2` is greater than `m_1`, then exclude region should be
`'m_2 > m_1'`.
fig : {None, pyplot.figure}
Add the plot to the given figure. If None and ax is None, will create
a new figure.
ax : {None, pyplot.axes}
Draw plot on the given axis. If None, will create a new axis from
`fig`.
use_kombine : {False, bool}
Use kombine's KDE to calculate density. Otherwise, will use
`scipy.stats.gaussian_kde.` Default is False.
Returns
-------
fig : pyplot.figure
The figure the plot was made on.
ax : pyplot.axes
The axes the plot was drawn on.
"""
if percentiles is None:
percentiles = numpy.array([50., 90.])
percentiles = 100. - numpy.array(percentiles)
percentiles.sort()
if ax is None and fig is None:
fig = pyplot.figure()
if ax is None:
ax = fig.add_subplot(111)
# convert samples to array and construct kde
xsamples = samples[xparam]
ysamples = samples[yparam]
arr = numpy.vstack((xsamples, ysamples)).T
kde = construct_kde(arr, use_kombine=use_kombine)
# construct grid to evaluate on
if xmin is None:
xmin = xsamples.min()
if xmax is None:
xmax = xsamples.max()
if ymin is None:
ymin = ysamples.min()
if ymax is None:
ymax = ysamples.max()
npts = 100
X, Y = numpy.mgrid[
xmin:xmax:complex(0, npts), # pylint:disable=invalid-slice-index
ymin:ymax:complex(0, npts)] # pylint:disable=invalid-slice-index
pos = numpy.vstack([X.ravel(), Y.ravel()])
if use_kombine:
Z = numpy.exp(kde(pos.T).reshape(X.shape))
draw = kde.draw
else:
Z = kde(pos).T.reshape(X.shape)
draw = kde.resample
if exclude_region is not None:
# convert X,Y to a single FieldArray so we can use it's ability to
# evaluate strings
farr = FieldArray.from_kwargs(**{xparam: X, yparam: Y})
Z[farr[exclude_region]] = 0.
if plot_density:
ax.imshow(numpy.rot90(Z), extent=[xmin, xmax, ymin, ymax],
aspect='auto', cmap=cmap, zorder=1)
if contour_color is None:
contour_color = 'w'
if plot_contours:
# compute the percentile values
resamps = kde(draw(int(npts**2)))
if use_kombine:
resamps = numpy.exp(resamps)
s = numpy.percentile(resamps, percentiles)
if contour_color is None:
contour_color = 'k'
# make linewidths thicker if not plotting density for clarity
if plot_density:
lw = 1
else:
lw = 2
ct = ax.contour(X, Y, Z, s, colors=contour_color, linewidths=lw,
zorder=3)
# label contours
lbls = ['{p}%'.format(p=int(p)) for p in (100. - percentiles)]
fmt = dict(zip(ct.levels, lbls))
fs = 12
ax.clabel(ct, ct.levels, inline=True, fmt=fmt, fontsize=fs)
return fig, ax
def create_marginalized_hist(ax, values, label, percentiles=None,
color='k', fillcolor='gray', linecolor='navy',
linestyle='-',
title=True, expected_value=None,
expected_color='red', rotated=False,
plot_min=None, plot_max=None):
"""Plots a 1D marginalized histogram of the given param from the given
samples.
Parameters
----------
ax : pyplot.Axes
The axes on which to draw the plot.
values : array
The parameter values to plot.
label : str
A label to use for the title.
percentiles : {None, float or array}
What percentiles to draw lines at. If None, will draw lines at
`[5, 50, 95]` (i.e., the bounds on the upper 90th percentile and the
median).
color : {'k', string}
What color to make the histogram; default is black.
fillcolor : {'gray', string, or None}
What color to fill the histogram with. Set to None to not fill the
histogram. Default is 'gray'.
linestyle : str, optional
What line style to use for the histogram. Default is '-'.
linecolor : {'navy', string}
What color to use for the percentile lines. Default is 'navy'.
title : bool, optional
Add a title with a estimated value +/- uncertainty. The estimated value
is the pecentile halfway between the max/min of ``percentiles``, while
the uncertainty is given by the max/min of the ``percentiles``. If no
percentiles are specified, defaults to quoting the median +/- 95/5
percentiles.
rotated : {False, bool}
Plot the histogram on the y-axis instead of the x. Default is False.
plot_min : {None, float}
The minimum value to plot. If None, will default to whatever `pyplot`
creates.
plot_max : {None, float}
The maximum value to plot. If None, will default to whatever `pyplot`
creates.
scalefac : {1., float}
Factor to scale the default font sizes by. Default is 1 (no scaling).
"""
if fillcolor is None:
htype = 'step'
else:
htype = 'stepfilled'
if rotated:
orientation = 'horizontal'
else:
orientation = 'vertical'
ax.hist(values, bins=50, histtype=htype, orientation=orientation,
facecolor=fillcolor, edgecolor=color, ls=linestyle, lw=2,
density=True)
if percentiles is None:
percentiles = [5., 50., 95.]
if len(percentiles) > 0:
plotp = numpy.percentile(values, percentiles)
else:
plotp = []
for val in plotp:
if rotated:
ax.axhline(y=val, ls='dashed', color=linecolor, lw=2, zorder=3)
else:
ax.axvline(x=val, ls='dashed', color=linecolor, lw=2, zorder=3)
# plot expected
if expected_value is not None:
if rotated:
ax.axhline(expected_value, color=expected_color, lw=1.5, zorder=2)
else:
ax.axvline(expected_value, color=expected_color, lw=1.5, zorder=2)
if title:
if len(percentiles) > 0:
minp = min(percentiles)
maxp = max(percentiles)
medp = (maxp + minp) / 2.
else:
minp = 5
medp = 50
maxp = 95
values_min = numpy.percentile(values, minp)
values_med = numpy.percentile(values, medp)
values_max = numpy.percentile(values, maxp)
negerror = values_med - values_min
poserror = values_max - values_med
fmt = '${0}$'.format(str_utils.format_value(
values_med, negerror, plus_error=poserror))
if rotated:
ax.yaxis.set_label_position("right")
# sets colored title for marginal histogram
set_marginal_histogram_title(ax, fmt, color,
label=label, rotated=rotated)
else:
# sets colored title for marginal histogram
set_marginal_histogram_title(ax, fmt, color, label=label)
# remove ticks and set limits
if rotated:
# Remove x-ticks
ax.set_xticks([])
# turn off x-labels
ax.set_xlabel('')
# set limits
ymin, ymax = ax.get_ylim()
if plot_min is not None:
ymin = plot_min
if plot_max is not None:
ymax = plot_max
ax.set_ylim(ymin, ymax)
else:
# Remove y-ticks
ax.set_yticks([])
# turn off y-label
ax.set_ylabel('')
# set limits
xmin, xmax = ax.get_xlim()
if plot_min is not None:
xmin = plot_min
if plot_max is not None:
xmax = plot_max
ax.set_xlim(xmin, xmax)
def set_marginal_histogram_title(ax, fmt, color, label=None, rotated=False):
""" Sets the title of the marginal histograms.
Parameters
----------
ax : Axes
The `Axes` instance for the plot.
fmt : str
The string to add to the title.
color : str
The color of the text to add to the title.
label : str
If title does not exist, then include label at beginning of the string.
rotated : bool
If `True` then rotate the text 270 degrees for sideways title.
"""
# get rotation angle of the title
rotation = 270 if rotated else 0
# get how much to displace title on axes
xscale = 1.05 if rotated else 0.0
if rotated:
yscale = 1.0
elif len(ax.get_figure().axes) > 1:
yscale = 1.15
else:
yscale = 1.05
# get class that packs text boxes vertical or horizonitally
packer_class = offsetbox.VPacker if rotated else offsetbox.HPacker
# if no title exists
if not hasattr(ax, "title_boxes"):
# create a text box
title = "{} = {}".format(label, fmt)
tbox1 = offsetbox.TextArea(
title,
textprops=dict(color=color, size=15, rotation=rotation,
ha='left', va='bottom'))
# save a list of text boxes as attribute for later
ax.title_boxes = [tbox1]
# pack text boxes
ybox = packer_class(children=ax.title_boxes,
align="bottom", pad=0, sep=5)
# else append existing title
else:
# delete old title
ax.title_anchor.remove()
# add new text box to list
tbox1 = offsetbox.TextArea(
" {}".format(fmt),
textprops=dict(color=color, size=15, rotation=rotation,
ha='left', va='bottom'))
ax.title_boxes = ax.title_boxes + [tbox1]
# pack text boxes
ybox = packer_class(children=ax.title_boxes,
align="bottom", pad=0, sep=5)
# add new title and keep reference to instance as an attribute
anchored_ybox = offsetbox.AnchoredOffsetbox(
loc=2, child=ybox, pad=0.,
frameon=False, bbox_to_anchor=(xscale, yscale),
bbox_transform=ax.transAxes, borderpad=0.)
ax.title_anchor = ax.add_artist(anchored_ybox)
def create_multidim_plot(parameters, samples, labels=None,
mins=None, maxs=None, expected_parameters=None,
expected_parameters_color='r',
plot_marginal=True, plot_scatter=True,
marginal_percentiles=None, contour_percentiles=None,
marginal_title=True, marginal_linestyle='-',
zvals=None, show_colorbar=True, cbar_label=None,
vmin=None, vmax=None, scatter_cmap='plasma',
plot_density=False, plot_contours=True,
density_cmap='viridis',
contour_color=None, hist_color='black',
line_color=None, fill_color='gray',
use_kombine=False, fig=None, axis_dict=None):
"""Generate a figure with several plots and histograms.
Parameters
----------
parameters: list
Names of the variables to be plotted.
samples : FieldArray
A field array of the samples to plot.
labels: dict, optional
A dictionary mapping parameters to labels. If none provided, will just
use the parameter strings as the labels.
mins : {None, dict}, optional
Minimum value for the axis of each variable in `parameters`.
If None, it will use the minimum of the corresponding variable in
`samples`.
maxs : {None, dict}, optional
Maximum value for the axis of each variable in `parameters`.
If None, it will use the maximum of the corresponding variable in
`samples`.
expected_parameters : {None, dict}, optional
Expected values of `parameters`, as a dictionary mapping parameter
names -> values. A cross will be plotted at the location of the
expected parameters on axes that plot any of the expected parameters.
expected_parameters_color : {'r', string}, optional
What color to make the expected parameters cross.
plot_marginal : {True, bool}
Plot the marginalized distribution on the diagonals. If False, the
diagonal axes will be turned off.
plot_scatter : {True, bool}
Plot each sample point as a scatter plot.
marginal_percentiles : {None, array}
What percentiles to draw lines at on the 1D histograms.
If None, will draw lines at `[5, 50, 95]` (i.e., the bounds on the
upper 90th percentile and the median).
marginal_title : bool, optional
Add a title over the 1D marginal plots that gives an estimated value
+/- uncertainty. The estimated value is the pecentile halfway between
the max/min of ``maginal_percentiles``, while the uncertainty is given
by the max/min of the ``marginal_percentiles. If no
``marginal_percentiles`` are specified, the median +/- 95/5 percentiles
will be quoted.
marginal_linestyle : str, optional
What line style to use for the marginal histograms.
contour_percentiles : {None, array}
What percentile contours to draw on the scatter plots. If None,
will plot the 50th and 90th percentiles.
zvals : {None, array}
An array to use for coloring the scatter plots. If None, scatter points
will be the same color.
show_colorbar : {True, bool}
Show the colorbar of zvalues used for the scatter points. A ValueError
will be raised if zvals is None and this is True.
cbar_label : {None, str}
Specify a label to add to the colorbar.
vmin: {None, float}, optional
Minimum value for the colorbar. If None, will use the minimum of zvals.
vmax: {None, float}, optional
Maximum value for the colorbar. If None, will use the maxmimum of
zvals.
scatter_cmap : {'plasma', string}
The color map to use for the scatter points. Default is 'plasma'.
plot_density : {False, bool}
Plot the density of points as a color map.
plot_contours : {True, bool}
Draw contours showing the 50th and 90th percentile confidence regions.
density_cmap : {'viridis', string}
The color map to use for the density plot.
contour_color : {None, string}
The color to use for the contour lines. Defaults to white for
density plots, navy for scatter plots without zvals, and black
otherwise.
use_kombine : {False, bool}
Use kombine's KDE to calculate density. Otherwise, will use
`scipy.stats.gaussian_kde.` Default is False.
Returns
-------
fig : pyplot.figure
The figure that was created.
axis_dict : dict
A dictionary mapping the parameter combinations to the axis and their
location in the subplots grid; i.e., the key, values are:
`{('param1', 'param2'): (pyplot.axes, row index, column index)}`
"""
if labels is None:
labels = {p: p for p in parameters}
# set up the figure with a grid of axes
# if only plotting 2 parameters, make the marginal plots smaller
nparams = len(parameters)
if nparams == 2:
width_ratios = [3, 1]
height_ratios = [1, 3]
else:
width_ratios = height_ratios = None
# only plot scatter if more than one parameter
plot_scatter = plot_scatter and nparams > 1
# Sort zvals to get higher values on top in scatter plots
if plot_scatter:
if zvals is not None:
sort_indices = zvals.argsort()
zvals = zvals[sort_indices]
samples = samples[sort_indices]
if contour_color is None:
contour_color = 'k'
elif show_colorbar:
raise ValueError("must provide z values to create a colorbar")
else:
# just make all scatter points same color
zvals = 'gray'
if plot_contours and contour_color is None:
contour_color = 'navy'
# convert samples to a dictionary to avoid re-computing derived parameters
# every time they are needed
samples = dict([[p, samples[p]] for p in parameters])
# values for axis bounds
if mins is None:
mins = {p: samples[p].min() for p in parameters}
else:
# copy the dict
mins = {p: val for p, val in mins.items()}
if maxs is None:
maxs = {p: samples[p].max() for p in parameters}
else:
# copy the dict
maxs = {p: val for p, val in maxs.items()}
# create the axis grid
if fig is None and axis_dict is None:
fig, axis_dict = create_axes_grid(
parameters, labels=labels,
width_ratios=width_ratios, height_ratios=height_ratios,
no_diagonals=not plot_marginal)
# Diagonals...
if plot_marginal:
for pi, param in enumerate(parameters):
ax, _, _ = axis_dict[param, param]
# if only plotting 2 parameters and on the second parameter,
# rotate the marginal plot
rotated = nparams == 2 and pi == nparams-1
# see if there are expected values
if expected_parameters is not None:
try:
expected_value = expected_parameters[param]
except KeyError:
expected_value = None
else:
expected_value = None
create_marginalized_hist(
ax, samples[param], label=labels[param],
color=hist_color, fillcolor=fill_color,
linestyle=marginal_linestyle, linecolor=line_color,
title=marginal_title, expected_value=expected_value,
expected_color=expected_parameters_color,
rotated=rotated, plot_min=mins[param], plot_max=maxs[param],
percentiles=marginal_percentiles)
# Off-diagonals...
for px, py in axis_dict:
if px == py:
continue
ax, _, _ = axis_dict[px, py]
if plot_scatter:
if plot_density:
alpha = 0.3
else:
alpha = 1.
plt = ax.scatter(x=samples[px], y=samples[py], c=zvals, s=5,
edgecolors='none', vmin=vmin, vmax=vmax,
cmap=scatter_cmap, alpha=alpha, zorder=2)
if plot_contours or plot_density:
# Exclude out-of-bound regions
# this is a bit kludgy; should probably figure out a better
# solution to eventually allow for more than just m_p m_s
if (px == 'm_p' and py == 'm_s') or (py == 'm_p' and px == 'm_s'):
exclude_region = 'm_s > m_p'
else:
exclude_region = None
create_density_plot(
px, py, samples, plot_density=plot_density,
plot_contours=plot_contours, cmap=density_cmap,
percentiles=contour_percentiles,
contour_color=contour_color, xmin=mins[px], xmax=maxs[px],
ymin=mins[py], ymax=maxs[py],
exclude_region=exclude_region, ax=ax,
use_kombine=use_kombine)
if expected_parameters is not None:
try:
ax.axvline(expected_parameters[px], lw=1.5,
color=expected_parameters_color, zorder=5)
except KeyError:
pass
try:
ax.axhline(expected_parameters[py], lw=1.5,
color=expected_parameters_color, zorder=5)
except KeyError:
pass
ax.set_xlim(mins[px], maxs[px])
ax.set_ylim(mins[py], maxs[py])
# adjust tick number for large number of plots
if len(parameters) > 3:
for px, py in axis_dict:
ax, _, _ = axis_dict[px, py]
ax.set_xticks(reduce_ticks(ax, 'x', maxticks=3))
ax.set_yticks(reduce_ticks(ax, 'y', maxticks=3))
if plot_scatter and show_colorbar:
# compute font size based on fig size
scale_fac = get_scale_fac(fig)
fig.subplots_adjust(right=0.85, wspace=0.03)
cbar_ax = fig.add_axes([0.9, 0.1, 0.03, 0.8])
cb = fig.colorbar(plt, cax=cbar_ax)
if cbar_label is not None:
cb.set_label(cbar_label, fontsize=12*scale_fac)
cb.ax.tick_params(labelsize=8*scale_fac)
return fig, axis_dict
def remove_common_offset(arr):
"""Given an array of data, removes a common offset > 1000, returning the
removed value.
"""
offset = 0
isneg = (arr <= 0).all()
# make sure all values have the same sign
if isneg or (arr >= 0).all():
# only remove offset if the minimum and maximum values are the same
# order of magintude and > O(1000)
minpwr = numpy.log10(abs(arr).min())
maxpwr = numpy.log10(abs(arr).max())
if numpy.floor(minpwr) == numpy.floor(maxpwr) and minpwr > 3:
offset = numpy.floor(10**minpwr)
if isneg:
offset *= -1
arr = arr - offset
return arr, int(offset)
def reduce_ticks(ax, which, maxticks=3):
"""Given a pyplot axis, resamples its `which`-axis ticks such that are at most
`maxticks` left.
Parameters
----------
ax : axis
The axis to adjust.
which : {'x' | 'y'}
Which axis to adjust.
maxticks : {3, int}
Maximum number of ticks to use.
Returns
-------
array
An array of the selected ticks.
"""
ticks = getattr(ax, 'get_{}ticks'.format(which))()
if len(ticks) > maxticks:
# make sure the left/right value is not at the edge
minax, maxax = getattr(ax, 'get_{}lim'.format(which))()
dw = abs(maxax-minax)/10.
start_idx, end_idx = 0, len(ticks)
if ticks[0] < minax + dw:
start_idx += 1
if ticks[-1] > maxax - dw:
end_idx -= 1
# get reduction factor
fac = int(len(ticks) / maxticks)
ticks = ticks[start_idx:end_idx:fac]
return ticks
| gpl-3.0 |
boland1992/SeisSuite | build/lib/ambient/ant/psspectrum.py | 8 | 7434 | """
Module related to Fourier spectrum of trace
"""
import psutils
import numpy as np
from numpy.fft import rfft
import matplotlib.pyplot as plt
import itertools as it
class FreqAmplSpectrum:
"""
Frequence-amplitude spectrum
"""
def __init__(self, trace):
"""
@type trace: L{obspy.core.trace.Trace}
"""
# initializing frequency and amplitude arrays
npts = trace.stats.npts
nfreq = npts / 2 + 1 if npts % 2 == 0 else (npts + 1) / 2
self.freq = np.arange(nfreq) * trace.stats.sampling_rate / npts
self.coef = np.zeros(nfreq, dtype=np.complex64)
# adding spectrum of trace
self.add(trace)
def __str__(self):
"""
e.g., Fourier spectrum in frequency interval 0-150 Hz
@rtype: str
"""
s = "Fourier spectrum in frequency interval {minf:.1f}-{maxf:.1f} Hz"
return s.format(minf=min(self.freq), maxf=max(self.freq))
def __repr__(self):
return "<{0}>".format(self.__str__())
def add(self, trace):
"""
Adds (spectrum of) trace to spectrum
@type trace: L{obspy.core.trace.Trace}
"""
self.coef += rfft(trace.data)
class SpectrumInfos:
"""
Infos on freq-ampl spectrum:
station, filters, saved trace, freq-ampl spectrum
"""
def __init__(self, station, filters, trace, savetrace=False):
"""
@type station: L{Station}
@type filters: str
@type trace: L{obspy.core.trace.Trace}
"""
self.station = station
self.filters = filters
self.savedtrace = trace if savetrace else None
self.spectrum = FreqAmplSpectrum(trace=trace)
def __repr__(self):
"""
e.g., <Fourier spectrum in frequency interval 0-150 Hz, station BL.10.NUPB,
filters 'RAW'>
@rtype: str
"""
s = "<{strspect}, station {net}.{ch}.{name}, filters '{filters}'>"
return s.format(strspect=self.spectrum, net=self.station.network,
ch=self.station.channel, name=self.station.name,
filters=self.filters)
def add(self, trace, savetrace=False):
"""
Adds (spectrum of) trace to spectrum.
@type trace: L{obspy.core.trace.Trace}
"""
self.spectrum.add(trace)
if savetrace:
self.savedtrace = trace
class SpectrumList(list):
"""
List of amplitude spectra: one spectrum per station
and per filtering sequence
"""
def __init__(self):
"""
@type self: list of L{SpectrumInfos}
"""
list.__init__(self)
def __repr__(self):
"""
e.g., <list of 4 spectra on 2 stations>
@rtype: str
"""
nstat = len(set(spect.station.name for spect in self))
s = "<list of {nspect} spectra on {nstat} stations>"
return s.format(nspect=len(self), nstat=nstat)
def add(self, trace, station, filters, starttime=None,
endtime=None, savetrace=False):
"""
Adds (spectrum of) trace to spectrum list: if station/filters already exist
in list, (spectrum of) trace is stacked. Else, a new spectrum is appended.
@type trace: L{obspy.core.trace.Trace}
@type station: L{Station}
@type filters: str
@type starttime: L{UTCDateTime}
@type endtime: L{UTCDateTime}
@type savetrace: bool
"""
# trimming trace if needed (and always working with copy!)
trcopy = trace.copy()
if starttime:
trcopy.trim(starttime=starttime)
if endtime:
trcopy.trim(endtime=endtime)
try:
# looking for spectrum of station/filtering steps in list
spectrum = next(spect for spect in self
if spect.station == station and spect.filters == filters)
except StopIteration:
# no spectrum for station/filters
# -> appending a new SpectrumInfos instance
spectrum_infos = SpectrumInfos(station=station, filters=filters,
trace=trcopy, savetrace=savetrace)
self.append(spectrum_infos)
else:
# spectrum of station/filters already exists
# -> adding spectrum of current trace
spectrum.add(trace=trcopy, savetrace=savetrace)
def plot(self, smooth_window_freq=0.0):
"""
Plots list of spectra: rows = filtering steps, columns = stations
Plotting freq x abs(Fourier coefs)
"""
# list of stations and filters in spectra (preserving order)
filterslist = []
stationlist = []
for spect in self:
assert isinstance(spect, SpectrumInfos)
if spect.filters not in filterslist:
filterslist.append(spect.filters)
if spect.station not in stationlist:
stationlist.append(spect.station)
# rows = filtering steps, columns = stations: 1 station = 1 spectrum [+ 1 trace]
nrow = len(filterslist)
ncol = len(stationlist)
# plot per pair (station, filters)
plottraces = any([spect.savedtrace for spect in self])
plotperpair = 1 if not plottraces else 2
plt.figure()
for ipair, (filters, station) in enumerate(it.product(filterslist, stationlist)):
assert isinstance(filters, str)
try:
# looking for spectrum of station/filters
spect = next(spect for spect in self
if spect.station == station and spect.filters == filters)
except StopIteration:
continue
# getting freq and amplitude arrays
spectrum = spect.spectrum
assert isinstance(spectrum, FreqAmplSpectrum)
freq = spectrum.freq
ampl = np.abs(spectrum.coef)
# smoothing amplitude spectrum (except after spectral whitening)
if not 'white' in filters.lower():
halfwindow = int(round(smooth_window_freq / (freq[1] - freq[0]) / 2.0))
ampl = psutils.moving_avg(ampl, halfwindow)
# position of current station/filters in plot
irow = ipair / ncol + 1
icol = ipair % ncol + 1
pos = (irow - 1) * ncol * plotperpair + (icol - 1) * plotperpair + 1
# plotting frequence-amplitude
plt.subplot(nrow, ncol * plotperpair, pos)
plt.plot(freq[100:], ampl[100:])
plt.xlim((0.0, 0.5))
if icol == 1:
plt.ylabel(filters)
if irow == 1:
plt.title('{station} (ampl spectrum)'.format(station=spect.station.name))
if irow == nrow:
plt.xlabel('Frequency (Hz)')
# plotting trace
if plottraces and spect.savedtrace:
tr = spect.savedtrace
t = np.arange(0, tr.stats.npts / tr.stats.sampling_rate, tr.stats.delta)
t /= 3600.0
plt.subplot(nrow, ncol * plotperpair, pos + 1)
plt.plot(t, tr.data)
if irow == 1:
plt.title('{station} (last day)'.format(station=spect.station.name))
if irow == nrow:
plt.xlabel('Time (h)')
plt.show() | gpl-3.0 |
mblue9/tools-iuc | tools/vsnp/vsnp_build_tables.py | 2 | 17888 | #!/usr/bin/env python
import argparse
import multiprocessing
import os
import queue
import re
import pandas
import pandas.io.formats.excel
from Bio import SeqIO
INPUT_JSON_AVG_MQ_DIR = 'input_json_avg_mq_dir'
INPUT_JSON_DIR = 'input_json_dir'
INPUT_NEWICK_DIR = 'input_newick_dir'
# Maximum columns allowed in a LibreOffice
# spreadsheet is 1024. Excel allows for
# 16,384 columns, but we'll set the lower
# number as the maximum. Some browsers
# (e.g., Firefox on Linux) are configured
# to use LibreOffice for Excel spreadsheets.
MAXCOLS = 1024
OUTPUT_EXCEL_DIR = 'output_excel_dir'
def annotate_table(table_df, group, annotation_dict):
for gbk_chrome, pro in list(annotation_dict.items()):
ref_pos = list(table_df)
ref_series = pandas.Series(ref_pos)
ref_df = pandas.DataFrame(ref_series.str.split(':', expand=True).values, columns=['reference', 'position'])
all_ref = ref_df[ref_df['reference'] == gbk_chrome]
positions = all_ref.position.to_frame()
# Create an annotation file.
annotation_file = "%s_annotations.csv" % group
with open(annotation_file, "a") as fh:
for _, row in positions.iterrows():
pos = row.position
try:
aaa = pro.iloc[pro.index.get_loc(int(pos))][['chrom', 'locus', 'product', 'gene']]
try:
chrom, name, locus, tag = aaa.values[0]
print("{}:{}\t{}, {}, {}".format(chrom, pos, locus, tag, name), file=fh)
except ValueError:
# If only one annotation for the entire
# chromosome (e.g., flu) then having [0] fails
chrom, name, locus, tag = aaa.values
print("{}:{}\t{}, {}, {}".format(chrom, pos, locus, tag, name), file=fh)
except KeyError:
print("{}:{}\tNo annotated product".format(gbk_chrome, pos), file=fh)
# Read the annotation file into a data frame.
annotations_df = pandas.read_csv(annotation_file, sep='\t', header=None, names=['index', 'annotations'], index_col='index')
# Remove the annotation_file from disk since both
# cascade and sort tables are built using the file,
# and it is opened for writing in append mode.
os.remove(annotation_file)
# Process the data.
table_df_transposed = table_df.T
table_df_transposed.index = table_df_transposed.index.rename('index')
table_df_transposed = table_df_transposed.merge(annotations_df, left_index=True, right_index=True)
table_df = table_df_transposed.T
return table_df
def excel_formatter(json_file_name, excel_file_name, group, annotation_dict):
pandas.io.formats.excel.header_style = None
table_df = pandas.read_json(json_file_name, orient='split')
if annotation_dict is not None:
table_df = annotate_table(table_df, group, annotation_dict)
else:
table_df = table_df.append(pandas.Series(name='no annotations'))
writer = pandas.ExcelWriter(excel_file_name, engine='xlsxwriter')
table_df.to_excel(writer, sheet_name='Sheet1')
writer_book = writer.book
ws = writer.sheets['Sheet1']
format_a = writer_book.add_format({'bg_color': '#58FA82'})
format_g = writer_book.add_format({'bg_color': '#F7FE2E'})
format_c = writer_book.add_format({'bg_color': '#0000FF'})
format_t = writer_book.add_format({'bg_color': '#FF0000'})
format_normal = writer_book.add_format({'bg_color': '#FDFEFE'})
formatlowqual = writer_book.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'})
format_ambigous = writer_book.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'})
format_n = writer_book.add_format({'bg_color': '#E2CFDD'})
rows, cols = table_df.shape
ws.set_column(0, 0, 30)
ws.set_column(1, cols, 2.1)
ws.freeze_panes(2, 1)
format_annotation = writer_book.add_format({'font_color': '#0A028C', 'rotation': '-90', 'align': 'top'})
# Set last row.
ws.set_row(rows + 1, cols + 1, format_annotation)
# Make sure that row/column locations don't overlap.
ws.conditional_format(rows - 2, 1, rows - 1, cols, {'type': 'cell', 'criteria': '<', 'value': 55, 'format': formatlowqual})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'cell', 'criteria': '==', 'value': 'B$2', 'format': format_normal})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'A', 'format': format_a})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'G', 'format': format_g})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'C', 'format': format_c})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'T', 'format': format_t})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'S', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'Y', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'R', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'W', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'K', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'M', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'N', 'format': format_n})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': '-', 'format': format_n})
format_rotation = writer_book.add_format({})
format_rotation.set_rotation(90)
for column_num, column_name in enumerate(list(table_df.columns)):
ws.write(0, column_num + 1, column_name, format_rotation)
format_annotation = writer_book.add_format({'font_color': '#0A028C', 'rotation': '-90', 'align': 'top'})
# Set last row.
ws.set_row(rows, 400, format_annotation)
writer.save()
def get_annotation_dict(gbk_file):
gbk_dict = SeqIO.to_dict(SeqIO.parse(gbk_file, "genbank"))
annotation_dict = {}
tmp_file = "features.csv"
# Create a file of chromosomes and features.
for chromosome in list(gbk_dict.keys()):
with open(tmp_file, 'w+') as fh:
for feature in gbk_dict[chromosome].features:
if "CDS" in feature.type or "rRNA" in feature.type:
try:
product = feature.qualifiers['product'][0]
except KeyError:
product = None
try:
locus = feature.qualifiers['locus_tag'][0]
except KeyError:
locus = None
try:
gene = feature.qualifiers['gene'][0]
except KeyError:
gene = None
fh.write("%s\t%d\t%d\t%s\t%s\t%s\n" % (chromosome, int(feature.location.start), int(feature.location.end), locus, product, gene))
# Read the chromosomes and features file into a data frame.
df = pandas.read_csv(tmp_file, sep='\t', names=["chrom", "start", "stop", "locus", "product", "gene"])
# Process the data.
df = df.sort_values(['start', 'gene'], ascending=[True, False])
df = df.drop_duplicates('start')
pro = df.reset_index(drop=True)
pro.index = pandas.IntervalIndex.from_arrays(pro['start'], pro['stop'], closed='both')
annotation_dict[chromosome] = pro
return annotation_dict
def get_base_file_name(file_path):
base_file_name = os.path.basename(file_path)
if base_file_name.find(".") > 0:
# Eliminate the extension.
return os.path.splitext(base_file_name)[0]
elif base_file_name.find("_") > 0:
# The dot extension was likely changed to
# the " character.
items = base_file_name.split("_")
return "_".join(items[0:-1])
else:
return base_file_name
def output_cascade_table(cascade_order, mqdf, group, annotation_dict):
cascade_order_mq = pandas.concat([cascade_order, mqdf], join='inner')
output_table(cascade_order_mq, "cascade", group, annotation_dict)
def output_excel(df, type_str, group, annotation_dict, count=None):
# Output the temporary json file that
# is used by the excel_formatter.
if count is None:
if group is None:
json_file_name = "%s_order_mq.json" % type_str
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_table.xlsx" % type_str)
else:
json_file_name = "%s_%s_order_mq.json" % (group, type_str)
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_table.xlsx" % (group, type_str))
else:
if group is None:
json_file_name = "%s_order_mq_%d.json" % (type_str, count)
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_table_%d.xlsx" % (type_str, count))
else:
json_file_name = "%s_%s_order_mq_%d.json" % (group, type_str, count)
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_table_%d.xlsx" % (group, type_str, count))
df.to_json(json_file_name, orient='split')
# Output the Excel file.
excel_formatter(json_file_name, excel_file_name, group, annotation_dict)
def output_sort_table(cascade_order, mqdf, group, annotation_dict):
sort_df = cascade_order.T
sort_df['abs_value'] = sort_df.index
sort_df[['chrom', 'pos']] = sort_df['abs_value'].str.split(':', expand=True)
sort_df = sort_df.drop(['abs_value', 'chrom'], axis=1)
sort_df.pos = sort_df.pos.astype(int)
sort_df = sort_df.sort_values(by=['pos'])
sort_df = sort_df.drop(['pos'], axis=1)
sort_df = sort_df.T
sort_order_mq = pandas.concat([sort_df, mqdf], join='inner')
output_table(sort_order_mq, "sort", group, annotation_dict)
def output_table(df, type_str, group, annotation_dict):
if isinstance(group, str) and group.startswith("dataset"):
# Inputs are single files, not collections,
# so input file names are not useful for naming
# output files.
group_str = None
else:
group_str = group
count = 0
chunk_start = 0
chunk_end = 0
column_count = df.shape[1]
if column_count >= MAXCOLS:
# Here the number of columns is greater than
# the maximum allowed by Excel, so multiple
# outputs will be produced.
while column_count >= MAXCOLS:
count += 1
chunk_end += MAXCOLS
df_of_type = df.iloc[:, chunk_start:chunk_end]
output_excel(df_of_type, type_str, group_str, annotation_dict, count=count)
chunk_start += MAXCOLS
column_count -= MAXCOLS
count += 1
df_of_type = df.iloc[:, chunk_start:]
output_excel(df_of_type, type_str, group_str, annotation_dict, count=count)
else:
output_excel(df, type_str, group_str, annotation_dict)
def preprocess_tables(task_queue, annotation_dict, timeout):
while True:
try:
tup = task_queue.get(block=True, timeout=timeout)
except queue.Empty:
break
newick_file, json_file, json_avg_mq_file = tup
avg_mq_series = pandas.read_json(json_avg_mq_file, typ='series', orient='split')
# Map quality to dataframe.
mqdf = avg_mq_series.to_frame(name='MQ')
mqdf = mqdf.T
# Get the group.
group = get_base_file_name(newick_file)
snps_df = pandas.read_json(json_file, orient='split')
with open(newick_file, 'r') as fh:
for line in fh:
line = re.sub('[:,]', '\n', line)
line = re.sub('[)(]', '', line)
line = re.sub(r'[0-9].*\.[0-9].*\n', '', line)
line = re.sub('root\n', '', line)
sample_order = line.split('\n')
sample_order = list([_f for _f in sample_order if _f])
sample_order.insert(0, 'root')
tree_order = snps_df.loc[sample_order]
# Count number of SNPs in each column.
snp_per_column = []
for column_header in tree_order:
count = 0
column = tree_order[column_header]
for element in column:
if element != column[0]:
count = count + 1
snp_per_column.append(count)
row1 = pandas.Series(snp_per_column, tree_order.columns, name="snp_per_column")
# Count number of SNPS from the
# top of each column in the table.
snp_from_top = []
for column_header in tree_order:
count = 0
column = tree_order[column_header]
# for each element in the column
# skip the first element
for element in column[1:]:
if element == column[0]:
count = count + 1
else:
break
snp_from_top.append(count)
row2 = pandas.Series(snp_from_top, tree_order.columns, name="snp_from_top")
tree_order = tree_order.append([row1])
tree_order = tree_order.append([row2])
# In pandas=0.18.1 even this does not work:
# abc = row1.to_frame()
# abc = abc.T --> tree_order.shape (5, 18), abc.shape (1, 18)
# tree_order.append(abc)
# Continue to get error: "*** ValueError: all the input arrays must have same number of dimensions"
tree_order = tree_order.T
tree_order = tree_order.sort_values(['snp_from_top', 'snp_per_column'], ascending=[True, False])
tree_order = tree_order.T
# Remove snp_per_column and snp_from_top rows.
cascade_order = tree_order[:-2]
# Output the cascade table.
output_cascade_table(cascade_order, mqdf, group, annotation_dict)
# Output the sorted table.
output_sort_table(cascade_order, mqdf, group, annotation_dict)
task_queue.task_done()
def set_num_cpus(num_files, processes):
num_cpus = int(multiprocessing.cpu_count())
if num_files < num_cpus and num_files < processes:
return num_files
if num_cpus < processes:
half_cpus = int(num_cpus / 2)
if num_files < half_cpus:
return num_files
return half_cpus
return processes
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_avg_mq_json', action='store', dest='input_avg_mq_json', required=False, default=None, help='Average MQ json file')
parser.add_argument('--input_newick', action='store', dest='input_newick', required=False, default=None, help='Newick file')
parser.add_argument('--input_snps_json', action='store', dest='input_snps_json', required=False, default=None, help='SNPs json file')
parser.add_argument('--gbk_file', action='store', dest='gbk_file', required=False, default=None, help='Optional gbk file'),
parser.add_argument('--processes', action='store', dest='processes', type=int, help='User-selected number of processes to use for job splitting')
args = parser.parse_args()
if args.gbk_file is not None:
# Create the annotation_dict for annotating
# the Excel tables.
annotation_dict = get_annotation_dict(args.gbk_file)
else:
annotation_dict = None
# The assumption here is that the list of files
# in both INPUT_NEWICK_DIR and INPUT_JSON_DIR are
# named such that they are properly matched if
# the directories contain more than 1 file (i.e.,
# hopefully the newick file names and json file names
# will be something like Mbovis-01D6_* so they can be
# sorted and properly associated with each other).
if args.input_newick is not None:
newick_files = [args.input_newick]
else:
newick_files = []
for file_name in sorted(os.listdir(INPUT_NEWICK_DIR)):
file_path = os.path.abspath(os.path.join(INPUT_NEWICK_DIR, file_name))
newick_files.append(file_path)
if args.input_snps_json is not None:
json_files = [args.input_snps_json]
else:
json_files = []
for file_name in sorted(os.listdir(INPUT_JSON_DIR)):
file_path = os.path.abspath(os.path.join(INPUT_JSON_DIR, file_name))
json_files.append(file_path)
if args.input_avg_mq_json is not None:
json_avg_mq_files = [args.input_avg_mq_json]
else:
json_avg_mq_files = []
for file_name in sorted(os.listdir(INPUT_JSON_AVG_MQ_DIR)):
file_path = os.path.abspath(os.path.join(INPUT_JSON_AVG_MQ_DIR, file_name))
json_avg_mq_files.append(file_path)
multiprocessing.set_start_method('spawn')
queue1 = multiprocessing.JoinableQueue()
queue2 = multiprocessing.JoinableQueue()
num_files = len(newick_files)
cpus = set_num_cpus(num_files, args.processes)
# Set a timeout for get()s in the queue.
timeout = 0.05
for i, newick_file in enumerate(newick_files):
json_file = json_files[i]
json_avg_mq_file = json_avg_mq_files[i]
queue1.put((newick_file, json_file, json_avg_mq_file))
# Complete the preprocess_tables task.
processes = [multiprocessing.Process(target=preprocess_tables, args=(queue1, annotation_dict, timeout, )) for _ in range(cpus)]
for p in processes:
p.start()
for p in processes:
p.join()
queue1.join()
if queue1.empty():
queue1.close()
queue1.join_thread()
| mit |
sunzuolei/robot | Codes.python/P2/P2.py | 1 | 1166 | #-------by HYH -------#
import numpy as np
import matplotlib.pyplot as plt
world=np.array(['green','red','red','green','green'])
p=np.array([0.2,0.2,0.2,0.2,0.2])
z='green'
pHit=0.6
pMiss=0.2
def plot_P2():
plt.figure(figsize=(8,10), dpi=80)
plt.subplot(411)
plt.bar(x=(0,1,2,3,4),height=p,color='red')
plt.xlabel('Position')
plt.ylabel('Probability of being at the positon')
plt.title('The Prior distribution')
##
plt.subplot(412)
q=np.zeros(np.size(p))
for i in range(len(p)):
if z==world[i]:
hit=1
else:
hit=0
q[i]=hit*pHit+(1-hit)*pMiss
plt.bar(x=(0,1,2,3,4),height=q,color='blue')
plt.xlabel('Position')
plt.ylabel('Likelihood')
plt.title('Observation model')
##
plt.subplot(413)
q=[p[i]*q[i] for i in range(len(p))]
plt.bar(x=(0,1,2,3,4),height=q,color='yellow')
plt.xlabel('Position')
plt.ylabel('Probability of being at the positon')
plt.title('The Posterior distribution')
##
plt.subplot(414)
q=q/sum(q)
plt.bar(x=(0,1,2,3,4),height=q,color='green')
plt.xlabel('Position')
plt.ylabel('Probability of being at the positon')
plt.title('The Posterior distribution')
plt.subplots_adjust(hspace=0.8)
plt.show()
plot_P2() | mit |
luis-rr/nest-simulator | topology/examples/test_3d_exp.py | 13 | 2956 | # -*- coding: utf-8 -*-
#
# test_3d_exp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module
EXPERIMENTAL example of 3d layer.
3d layers are currently not supported, use at your own risk!
Hans Ekkehard Plesser, UMB
This example uses the function GetChildren, which is deprecated. A deprecation
warning is therefore issued. For details about deprecated functions, see
documentation.
'''
import nest
import pylab
import random
import nest.topology as topo
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
pylab.ion()
nest.ResetKernel()
# generate list of 1000 (x,y,z) triplets
pos = [[random.uniform(-0.5, 0.5), random.uniform(-0.5, 0.5),
random.uniform(-0.5, 0.5)]
for j in range(1000)]
l1 = topo.CreateLayer(
{'extent': [1.5, 1.5, 1.5], # must specify 3d extent AND center
'center': [0., 0., 0.],
'positions': pos,
'elements': 'iaf_psc_alpha'})
# visualize
# xext, yext = nest.GetStatus(l1, 'topology')[0]['extent']
# xctr, yctr = nest.GetStatus(l1, 'topology')[0]['center']
# l1_children is a work-around until NEST 3.0 is released
l1_children = nest.GetChildren(l1)[0]
# extract position information, transpose to list of x, y and z positions
xpos, ypos, zpos = zip(*topo.GetPosition(l1_children))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xpos, ypos, zpos, s=15, facecolor='b', edgecolor='none')
# Gaussian connections in full volume [-0.75,0.75]**3
topo.ConnectLayers(l1, l1,
{'connection_type': 'divergent', 'allow_autapses': False,
'mask': {'volume': {'lower_left': [-0.75, -0.75, -0.75],
'upper_right': [0.75, 0.75, 0.75]}},
'kernel': {'exponential':
{'c': 0., 'a': 1., 'tau': 0.25}}})
# show connections from center element
# sender shown in red, targets in green
ctr = topo.FindCenterElement(l1)
xtgt, ytgt, ztgt = zip(*topo.GetTargetPositions(ctr, l1)[0])
xctr, yctr, zctr = topo.GetPosition(ctr)[0]
ax.scatter([xctr], [yctr], [zctr], s=40, facecolor='r', edgecolor='none')
ax.scatter(xtgt, ytgt, ztgt, s=40, facecolor='g', edgecolor='g')
tgts = topo.GetTargetNodes(ctr, l1)[0]
d = topo.Distance(ctr, tgts)
plt.figure()
plt.hist(d, 25)
# plt.show()
| gpl-2.0 |
duncanmmacleod/gwsumm | gwsumm/tests/test_config.py | 1 | 8714 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2013)
#
# This file is part of GWSumm.
#
# GWSumm is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWSumm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWSumm. If not, see <http://www.gnu.org/licenses/>.
"""Tests for :mod:`gwsumm.config`
"""
import os.path
import tempfile
from io import StringIO
from collections import OrderedDict
from configparser import (DEFAULTSECT, ConfigParser)
import pytest
from matplotlib import rcParams
from astropy import units
from gwsumm import (state, config, html)
from gwsumm.channels import get_channel
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
TEST_CONFIG = StringIO("""
[DEFAULT]
defaultoption = defaultvalue
[section]
option1 = value1
option2 = True
option3 = 4
[plugins]
tempfile = ''
[units]
myunit = meter
cochrane = dimensionless
[%(IFO)s]
""")
def assert_configparser_equal(a, b):
for sect in set([DEFAULTSECT] + list(a.sections()) +
list(b.sections())):
assert list(a.items(sect)) == list(b.items(sect))
class TestGWSummConfigParser(object):
PARSER = config.GWSummConfigParser
@classmethod
def new(cls):
TEST_CONFIG.seek(0)
cp = cls.PARSER()
cp.read_file(TEST_CONFIG)
TEST_CONFIG.seek(0)
return cp
@classmethod
@pytest.fixture()
def cnfg(cls):
return cls.new()
# -- test creation --------------------------
def test_init(self):
cp = self.new()
assert cp.optionxform is str
assert cp._dict is OrderedDict
# -- test methods ---------------------------
def test_ndoptions(self, cnfg):
ndopts = cnfg.ndoptions('section')
assert isinstance(ndopts, list)
assert 'defaultoption' not in ndopts
def test_nditems(self, cnfg):
nditms = cnfg.nditems('section')
assert isinstance(nditms, list)
assert ('defaultoption', 'defaultvalue') not in nditms
def test_read(self):
cp = self.new()
# read config from file
with tempfile.NamedTemporaryFile(mode='w') as f:
f.write(TEST_CONFIG.read())
TEST_CONFIG.seek(0) # rewind for other users
read_ = cp.read(f.name)
assert read_ == [f.name]
assert cp.files == [os.path.abspath(f.name)]
# check error gets raised when file isn't read
with pytest.raises(IOError):
cp.read('does-not-exist.ini')
def test_from_configparser(self, cnfg):
# check that GWSummConfigParser gets returned as is
copy = self.PARSER.from_configparser(cnfg)
assert copy is cnfg
# check that base ConfigParser gets converted to GWSummConfigParser
cp = ConfigParser()
try:
cp.read_file(TEST_CONFIG)
except AttributeError:
cp.readfp(TEST_CONFIG)
TEST_CONFIG.seek(0)
copy = self.PARSER.from_configparser(cp)
assert isinstance(copy, self.PARSER)
print(list(copy.sections()))
print(list(cnfg.sections()))
assert_configparser_equal(copy, cnfg)
def test_interpolate_section_names(self, cnfg):
assert 'X1' not in cnfg.sections()
assert '%(IFO)s' in cnfg.sections()
cnfg.interpolate_section_names(IFO='X1')
assert 'X1' in cnfg.sections()
assert '%(IFO)s' not in cnfg.sections()
@pytest.mark.parametrize('ifo, obs, exp', [
('L1', None, 'LIGO Livingston'),
('X1', 'Einstein Telescope', 'Einstein Telescope'),
])
def test_set_ifo_options(self, ifo, obs, exp):
cp = self.new()
cp.set_ifo_options(ifo, observatory=obs)
assert cp.get(DEFAULTSECT, 'IFO') == ifo.upper()
assert cp.get(DEFAULTSECT, 'ifo') == ifo.lower()
assert cp.get(DEFAULTSECT, 'SITE') == ifo[0].upper()
assert cp.get(DEFAULTSECT, 'site') == ifo[0].lower()
assert cp.get(DEFAULTSECT, 'observatory') == exp
def test_set_date_options(self):
cp = self.new()
cp.set_date_options(0, 100)
assert cp.get(DEFAULTSECT, 'gps-start-time') == '0'
assert cp.get(DEFAULTSECT, 'gps-end-time') == '100'
assert cp.get(DEFAULTSECT, 'yyyy') == '1980'
assert cp.get(DEFAULTSECT, 'duration') == '100'
def test_load_rcParams(self):
# check empty config doesn't cause havoc
cp = self.PARSER()
assert cp.load_rcParams() == {}
cp = self.new()
cp.add_section('rcParams')
cp.set('rcParams', 'axes.labelsize', '100')
new = cp.load_rcParams()
assert new == {'axes.labelsize': 100}
assert rcParams['axes.labelsize'] == 100
def test_load_states(self):
cp = self.new()
cp.set_date_options(0, 100)
cp.add_section('states')
cp.set('states', 'locked', 'X1:TEST-STATE:1')
cp.load_states()
states = state.get_states()
assert len(states) == 2
assert 'locked' in states
assert states['locked'].definition == 'X1:TEST-STATE:1'
assert state.ALLSTATE in states
def test_load_plugins(self, cnfg):
# check that empty section doesn't cause havoc
cp = self.PARSER()
assert cp.load_plugins() == []
# check plugins get laoded
plugins = cnfg.load_plugins()
assert plugins == [tempfile]
def test_load_units(self, cnfg):
# check that empty section doesn't cause havoc
cp = self.PARSER()
assert cp.load_units() == []
newunits = cnfg.load_units()
assert newunits == [units.meter, units.dimensionless_unscaled]
def test_load_channels(self):
# test simple channel section
cp = self.PARSER()
cp.add_section('X1:TEST-CHANNEL')
cp.set('X1:TEST-CHANNEL', 'frametype', 'X1_TEST')
cp.load_channels()
c = get_channel('X1:TEST-CHANNEL')
assert c.frametype == 'X1_TEST'
# test with interpolation
cp.set(DEFAULTSECT, 'IFO', 'X1')
cp.add_section('%(IFO)s:TEST-CHANNEL_2')
cp.set('%(IFO)s:TEST-CHANNEL_2', 'resample', '128')
cp.interpolate_section_names(IFO='X1')
cp.load_channels()
c = get_channel('X1:TEST-CHANNEL_2')
assert c.resample == 128
# test bit parsing
cp.set('X1:TEST-CHANNEL', '0', 'Bit 0')
cp.set('X1:TEST-CHANNEL', '1', 'A_B')
cp.load_channels()
c = get_channel('X1:TEST-CHANNEL')
assert c.bits == ['Bit 0', 'A_B']
# test channels section
cp.add_section('channels-test')
cp.set('channels-test', 'channels',
'X1:TEST-CHANNEL,X1:TEST-CHANNEL_2')
cp.set('channels-test', 'unit', 'urad')
cp.load_channels()
assert c.unit == units.microradian
def test_finalize(self):
cp = self.new()
cp.set_date_options(0, 100)
cp.finalize()
def test_get_css(self):
# check empty result returns defaults
cp = self.PARSER()
css = cp.get_css()
assert css == list(html.get_css().values())
# check overrides
cp.add_section('html')
cp.set('html', 'bootstrap-css', 'test.css')
css = cp.get_css()
print(css)
assert 'test.css' in css
assert html.get_css()['bootstrap'] not in css
# check custom files
cp.set('html', 'extra-css', '"extra.css","/static/extra2.css"')
css = cp.get_css()
assert 'test.css' in css # still finds overrides
assert 'extra.css' in css and '/static/extra2.css' in css
def test_get_javascript(self):
# check empty result returns defaults
cp = self.PARSER()
js = cp.get_javascript()
assert js == list(html.get_js().values())
# check overrides
cp.add_section('html')
cp.set('html', 'bootstrap-js', 'test.js')
js = cp.get_javascript()
assert 'test.js' in js
assert html.get_js()['bootstrap'] not in js
# check custom files
cp.set('html', 'extra-js', '"extra.js","/static/extra2.js"')
js = cp.get_javascript()
assert 'test.js' in js # still finds overrides
assert 'extra.js' in js and '/static/extra2.js' in js
| gpl-3.0 |
untom/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
cshallue/models | research/lexnet_nc/learn_path_embeddings.py | 3 | 7726 | #!/usr/bin/env python
# Copyright 2017, 2018 Google, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains the LexNET path-based model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import lexnet_common
import path_model
from sklearn import metrics
import tensorflow as tf
tf.flags.DEFINE_string(
'dataset_dir', 'datasets',
'Dataset base directory')
tf.flags.DEFINE_string(
'dataset',
'tratz/fine_grained',
'Subdirectory containing the corpus directories: '
'subdirectory of dataset_dir')
tf.flags.DEFINE_string(
'corpus', 'random/wiki_gigawords',
'Subdirectory containing the corpus and split: '
'subdirectory of dataset_dir/dataset')
tf.flags.DEFINE_string(
'embeddings_base_path', 'embeddings',
'Embeddings base directory')
tf.flags.DEFINE_string(
'logdir', 'logdir',
'Directory of model output files')
FLAGS = tf.flags.FLAGS
def main(_):
# Pick up any one-off hyper-parameters.
hparams = path_model.PathBasedModel.default_hparams()
# Set the number of classes
classes_filename = os.path.join(
FLAGS.dataset_dir, FLAGS.dataset, 'classes.txt')
with open(classes_filename) as f_in:
classes = f_in.read().splitlines()
hparams.num_classes = len(classes)
print('Model will predict into %d classes' % hparams.num_classes)
# Get the datasets
train_set, val_set, test_set = (
os.path.join(
FLAGS.dataset_dir, FLAGS.dataset, FLAGS.corpus,
filename + '.tfrecs.gz')
for filename in ['train', 'val', 'test'])
print('Running with hyper-parameters: {}'.format(hparams))
# Load the instances
print('Loading instances...')
opts = tf.python_io.TFRecordOptions(
compression_type=tf.python_io.TFRecordCompressionType.GZIP)
train_instances = list(tf.python_io.tf_record_iterator(train_set, opts))
val_instances = list(tf.python_io.tf_record_iterator(val_set, opts))
test_instances = list(tf.python_io.tf_record_iterator(test_set, opts))
# Load the word embeddings
print('Loading word embeddings...')
lemma_embeddings = lexnet_common.load_word_embeddings(
FLAGS.embeddings_base_path, hparams.lemma_embeddings_file)
# Define the graph and the model
with tf.Graph().as_default():
with tf.variable_scope('lexnet'):
options = tf.python_io.TFRecordOptions(
compression_type=tf.python_io.TFRecordCompressionType.GZIP)
reader = tf.TFRecordReader(options=options)
_, train_instance = reader.read(
tf.train.string_input_producer([train_set]))
shuffled_train_instance = tf.train.shuffle_batch(
[train_instance],
batch_size=1,
num_threads=1,
capacity=len(train_instances),
min_after_dequeue=100,
)[0]
train_model = path_model.PathBasedModel(
hparams, lemma_embeddings, shuffled_train_instance)
with tf.variable_scope('lexnet', reuse=True):
val_instance = tf.placeholder(dtype=tf.string)
val_model = path_model.PathBasedModel(
hparams, lemma_embeddings, val_instance)
# Initialize a session and start training
logdir = (
'{logdir}/results/{dataset}/path/{corpus}/supervisor.logdir'.format(
logdir=FLAGS.logdir, dataset=FLAGS.dataset, corpus=FLAGS.corpus))
best_model_saver = tf.train.Saver()
f1_t = tf.placeholder(tf.float32)
best_f1_t = tf.Variable(0.0, trainable=False, name='best_f1')
assign_best_f1_op = tf.assign(best_f1_t, f1_t)
supervisor = tf.train.Supervisor(
logdir=logdir,
global_step=train_model.global_step)
with supervisor.managed_session() as session:
# Load the labels
print('Loading labels...')
val_labels = train_model.load_labels(session, val_instances)
save_path = '{logdir}/results/{dataset}/path/{corpus}/'.format(
logdir=FLAGS.logdir,
dataset=FLAGS.dataset,
corpus=FLAGS.corpus)
# Train the model
print('Training the model...')
while True:
step = session.run(train_model.global_step)
epoch = (step + len(train_instances) - 1) // len(train_instances)
if epoch > hparams.num_epochs:
break
print('Starting epoch %d (step %d)...' % (1 + epoch, step))
epoch_loss = train_model.run_one_epoch(session, len(train_instances))
best_f1 = session.run(best_f1_t)
f1 = epoch_completed(val_model, session, epoch, epoch_loss,
val_instances, val_labels, best_model_saver,
save_path, best_f1)
if f1 > best_f1:
session.run(assign_best_f1_op, {f1_t: f1})
if f1 < best_f1 - 0.08:
tf.logging.fino('Stopping training after %d epochs.\n' % epoch)
break
# Print the best performance on the validation set
best_f1 = session.run(best_f1_t)
print('Best performance on the validation set: F1=%.3f' % best_f1)
# Save the path embeddings
print('Computing the path embeddings...')
instances = train_instances + val_instances + test_instances
path_index, path_vectors = path_model.compute_path_embeddings(
val_model, session, instances)
path_emb_dir = '{dir}/path_embeddings/{dataset}/{corpus}/'.format(
dir=FLAGS.embeddings_base_path,
dataset=FLAGS.dataset,
corpus=FLAGS.corpus)
if not os.path.exists(path_emb_dir):
os.makedirs(path_emb_dir)
path_model.save_path_embeddings(
val_model, path_vectors, path_index, path_emb_dir)
def epoch_completed(model, session, epoch, epoch_loss,
val_instances, val_labels, saver, save_path, best_f1):
"""Runs every time an epoch completes.
Print the performance on the validation set, and update the saved model if
its performance is better on the previous ones. If the performance dropped,
tell the training to stop.
Args:
model: The currently trained path-based model.
session: The current TensorFlow session.
epoch: The epoch number.
epoch_loss: The current epoch loss.
val_instances: The validation set instances (evaluation between epochs).
val_labels: The validation set labels (for evaluation between epochs).
saver: tf.Saver object
save_path: Where to save the model.
best_f1: the best F1 achieved so far.
Returns:
The F1 achieved on the training set.
"""
# Evaluate on the validation set
val_pred = model.predict(session, val_instances)
precision, recall, f1, _ = metrics.precision_recall_fscore_support(
val_labels, val_pred, average='weighted')
print(
'Epoch: %d/%d, Loss: %f, validation set: P: %.3f, R: %.3f, F1: %.3f\n' % (
epoch + 1, model.hparams.num_epochs, epoch_loss,
precision, recall, f1))
if f1 > best_f1:
print('Saving model in: %s' % (save_path + 'best.ckpt'))
saver.save(session, save_path + 'best.ckpt')
print('Model saved in file: %s' % (save_path + 'best.ckpt'))
return f1
if __name__ == '__main__':
tf.app.run(main)
| apache-2.0 |
biokit/biokit | biokit/rtools/pyper.py | 1 | 18769 | #!/usr/bin/env python
"""FORK of PyperR version 1.1.2 from pypi
Reason for the fork. Code are written on top of pyper inside biokit.
Yet, PypeR is failing sometimes (see examples below) and we may need to
hack it. It had also Python 3 compat issues.
Examples that fail:
installed.packages() --> numpy.array failing
packageVersion("CellNOptR") --> special unicode not interpreted
The get() function fails with S4 objects such as CNOlist
We return None to avoid the failure::
else if (is.object(x)) 'None'
----------------------------------------------------------------------------
PypeR (PYthon-piPE-R)
PypeR is free software subjected to the GPL license 3.0. and comes with
ABSOLUTELY NO WARRANT. This package provides a light-weight interface to use R
in Python by pipe. It can be used on multiple platforms since it is written in
pure python.
Usage:
The usage of this packages is very simple. Examples are presented in the
file "test.py" in the distribution package.
PypeR provide a class "R" to wrap the R language. An instance of the R
class is used to manage an R process. Different instances can use different
R installations. On POSIX systems (including the Cygwin environment on
Windows), it is even possible to use an R installed on a remote computer.
Basicly, there are four ways to use an instance of the R class.
1. Use the methods of the instance
methods include:
run:This method is used to pass an R command string to the R process,
the return value is a string - the standard output from R. Note
that the return value usually includes the R expression (a
series of R codes) themselves and the output of the R
expression. If the real result value is wanted, use the
function "get" instead.
assign: Assign a value to an R variable. No return value.
get: Get the result of an R expression.
remove: Remove a R variable.
2. Call the instance as a function
The instance is callable. If called as a function, it behaves just
same as its "run" method.
3. Use the instance as a Python dictionary
The instance can mimic some operations on a python dictionary,
typically, to assign values to R variables, to retrieve values for any
R expression, or delete an R variable. These two operations do same
jobs as the methods "assign", "get", and "remove".
4. Access R variables as if they are the attributes of the instance.
If the variable name cannot be found in the instance or its class, the
instance will try to get/set/remove it in R. This way is similar to 3,
but with more limitations, e.g., the R variable name cannot contain any
DOT (.)
Considering that any code block in R is an expression, the "get" method (or
the form of retrieving values from a dictionary) can be used to run a
number of R commands with the final result returned.
Note that PypeR do NOT validate/convert a variable name when pass it to R.
If a variable name with a leading underscore ("_"), although it legal in
python, is passed to R, an RError will be raised.
Conversions:
Python -> R
None -> NULL, NaN -> NaN, Inf -> Inf
R -> Python (numpy)
NULL -> None, NA -> None, NaN -> None (NaN), Inf -> None (Inf)
DEBUG model:
Since the child process (R) can be easily killed by any ocassional error in
the codes passed to it, PypeR is set to "DEBUG" model by default. This
means that any code blocks send to R will be wrapped in the function
"try()", which will prevent R from crashing. To disable the "DEBUG" model,
the user can simple set the variable "DEBUG_MODE" in the R class or in its
instance to False.
To model the behavior of the "get" method of a Python dictionary, the
method "get" allows wild values for variables that does not exists in R.
Then the R expression will always be wrapped in "try()" to avoid R crashing
if the method "get" is called.
"""
import os
import sys
import time
import re
import tempfile
from types import * # need to clean that
import subprocess
from biokit.rtools.r4python import r4python
from biokit.rtools.py2r import *
# Even though there are not used in this module, these two imports
# are required for the RSession to properly interpret the strings
# as objects
import numpy
import pandas
if sys.version < '3.0':
_mystr = _mybytes = lambda s: s
_in_py3 = False
else:
from functools import reduce
long, basestring, unicode = int, str, str
_mybytes = lambda s: bytes(s, 'utf8') # 'ascii')
_mystr = lambda s: str(s, 'utf8')
_in_py3 = True
_has_subp = False
PIPE, _STDOUT = None, None
def Popen(CMD, *a, **b):
#original pyper code:
#class A:
# None
#p = A()
#p.stdin, p.stdout = os.popen4(' '.join(CMD))
# popen4 does not work in Python 3, so we use a Popen
p = subprocess.Popen(CMD,shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True)
return(p)
def sendAll(p, s):
p.stdin.write(_mybytes(s))
#os.write(p.stdin.fileno(), s)
p.stdin.flush()
def readLine(p, dump_stdout=False, *a, **b):
#rv = _mystr(nbsr.readline())
rv = _mystr(p.stdout.readline())
if dump_stdout:
sys.stdout.write(rv)
sys.stdout.flush()
return(rv)
class RError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return(repr(self.value))
class R(object):
# "del r.XXX" fails on FePy-r7 (IronPython 1.1 on .NET 2.0.50727.42) if using old-style class
"""A Python class to enclose an R process."""
__Rfun = r4python
_DEBUG_MODE = True
def __init__(self, RCMD='R', max_len=10000, use_dict=None,
host='localhost', user=None, ssh='ssh', return_err=True, dump_stdout=False,
verbose=False, options=('--quiet', '--no-save', '--no-restore')):
'''
RCMD: The name of a R interpreter, path information should be included
if it is not in the system search path.
use_dict: A R named list will be returned as a Python dictionary if
"use_dict" is True, or a list of tuples (name, value) if "use_dict"
is False. If "use_dict" is None, the return value will be a
dictionary if there is no replicated names, or a list if replicated
names found.
host: The computer name (or IP) on which the R interpreter is
installed. The value "localhost" means that R locates on the the
localhost computer. On POSIX systems (including Cygwin environment
on Windows), it is possible to use R on a remote computer if the
command "ssh" works. To do that, the user needs to set this value,
and perhaps the parameter "user".
user: The user name on the remote computer. This value needs to be set
only if the user name on the remote computer is different from the
local user. In interactive environment, the password can be input
by the user if prompted. If running in a program, the user needs to
be able to login without typing password!
ssh: The program to login to remote computer.
return_err: redirect stderr to stdout
dump_stdout:
prints output from R directly to sys.stdout, useful for long running
routines which print progress during execution.
'''
# use self.__dict__.update to register variables since __setattr__ is
# used to set variables for R. tried to define __setattr in the class,
# and change it to __setattr__ for instances at the end of __init__,
# but it seems failed.
# -- maybe this only failed in Python2.5? as warned at
# http://wiki.python.org/moin/NewClassVsClassicClass:
# "Warning: In 2.5, magic names (typically those with a double
# underscore (DunderAlias) at both ends of the name) may look at the
# class rather than the instance even for old-style classes."
self.__dict__.update({
'prog': None,
'Rfun': self.__class__.__Rfun,
'Rexecutable': RCMD,
'max_len': max_len,
'use_dict': use_dict,
'verbose': verbose,
'dump_stdout': dump_stdout,
'localhost': host == 'localhost',
'newline': sys.platform == 'win32' and '\r\n' or '\n',
'sendAll' : sendAll # keep a reference to the global function "sendAll" which will be used by __del__
})
RCMD = [RCMD]
if not self.localhost:
RCMD.insert(0, host)
if user:
RCMD.insert(0, '-l%s' % user)
RCMD.insert(0, ssh)
for arg in options:
if arg not in RCMD:
RCMD.append(arg)
if _has_subp and hasattr(subprocess, 'STARTUPINFO'):
info = subprocess.STARTUPINFO()
try:
if hasattr(subprocess, '_subprocess'):
info.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = subprocess._subprocess.SW_HIDE
else:
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = subprocess.SW_HIDE
except:
info = None
else:
info = None
# create stderr to replace None for py2exe:
# http://www.py2exe.org/index.cgi/Py2ExeSubprocessInteractions
if sys.platform != 'win32':
childstderr = None
else:
if hasattr(sys.stderr, 'fileno'):
childstderr = sys.stderr
elif hasattr(sys.stderr, '_file') and hasattr(sys.stderr._file, 'fileno'):
childstderr = sys.stderr._file
else: # Give up and point child stderr at nul
childstderr = file('nul', 'a')
from easydev import AttrDict
self.__dict__['subprocess_args'] = AttrDict(**{
'RCMD': RCMD,
'PIPE': PIPE,
'stderr': return_err and _STDOUT or childstderr,
'info': info})
self.reconnect()
def reconnect(self):
"""TC: Nov 2014
If CTRL+C is called, the pipe is broken, in which case, reconnecting
should be called.
"""
args = self.subprocess_args
RCMD = args['RCMD']
PIPE = args['PIPE']
stderr = args['stderr']
info = args['info']
self.__dict__['prog'] = Popen(RCMD, stdin=PIPE, stdout=PIPE,
stderr=stderr, startupinfo=info)
self.__call__(self.Rfun)
def __runOnce(self, CMD, use_try=None):
"""CMD: a R command string"""
use_try = use_try or self._DEBUG_MODE
newline = self.newline
tail_token = 'R command at time: %s' % repr(time.time())
# tail_token_r = re.sub(r'[\(\)\.]', r'\\\1', tail_token)
tail_cmd = 'print("%s")%s' % (tail_token, newline)
tail_token = tail_token.replace(' ', '\\s').replace('.', '\\.').replace('+', '\\+')
re_tail = re.compile(r'>\sprint\("%s"\)\r?\n\[1\]\s"%s"\r?\n$' % (tail_token, tail_token))
if len(CMD) <= self.max_len or not self.localhost:
fn = None
CMD = (use_try and 'try({%s})%s%s' or '%s%s%s') % (CMD.replace('\\', '\\\\'),
newline, tail_cmd)
else:
fh, fn = tempfile.mkstemp()
os.fdopen(fh, 'wb').write(_mybytes(CMD))
if sys.platform == 'cli':
os.close(fh) # this is necessary on IronPython
fn = fn.replace('\\', '/')
params = {'fn':fn , 'newline':newline, 'tail_cmd':tail_cmd}
if use_try is True:
CMD = 'try({source("%(fn)s")})%(newline)s ' % params
CMD += 'dummy=file.remove(%(fn)r)%(newline)s%(tail_cmd)s' % params
else:
CMD = '({source("%(fn)s")})%(newline)s ' % params
CMD += 'dummy=file.remove(%(fn)r)%(newline)s%(tail_cmd)s' % params
try:
self.sendAll(self.prog, CMD)
except IOError:
if self.verbose:
print("PIPE was broken. Reconnecting...")
self.reconnect()
self.sendAll(self.prog, CMD)
except Exception as err:
print("Failed")
raise err
rlt = ''
while not re_tail.search(rlt):
try:
rltonce = readLine(self.prog, dump_stdout=self.dump_stdout)
if rltonce:
rlt = rlt + rltonce
except:
break
else:
rlt = re_tail.sub('', rlt)
if rlt.startswith('> '):
rlt = rlt[2:]
return(rlt)
def __call__(self, CMDS=[], use_try=None):
"""Run a (list of) R command(s),
:param list CMDS: either a list of commands or a single command as a string.
:return: nothing but debugLevel is filled
"""
if isinstance(CMDS, basestring): # a single command
rlt = self.__runOnce(CMDS, use_try=use_try)
else:
rlt = self.__runOnce('; '.join(CMDS), use_try=use_try)
return rlt
def __getitem__(self, obj, use_try=None, use_dict=None): # to model a dict: "r['XXX']"
"""Get the value of an R variable or expression.
:return: a Python object.
:param obj: a string - the name of an R variable, or an R expression
:param use_try: use "try" function to wrap the R expression. This can avoid R
crashing if the obj does not exist in R.
:param use_dict: named list will be returned a dict if use_dict is True,
otherwise it will be a list of tuples (name, value)
"""
if obj.startswith('_'):
raise RError('Leading underscore ("_") is not permitted in R variable names!')
use_try = use_try or self._DEBUG_MODE
if use_dict is None:
use_dict = self.use_dict
cmd = '.getRvalue4Python__(%s, use_dict=%s)' % (obj, use_dict is None and 'NULL' or use_dict and 'TRUE' or 'FALSE')
rlt = self.__call__(cmd, use_try=use_try)
head = (use_try and 'try({%s})%s[1] ' or '%s%s[1] ') % (cmd, self.newline)
# sometimes (e.g. after "library(fastICA)") the R on Windows uses '\n' instead of '\r\n'
head = rlt.startswith(head) and len(head) or len(head) - 1
tail = rlt.endswith(self.newline) and len(rlt) - len(self.newline) or len(rlt) - len(self.newline) + 1 # - len('"')
try:
rlt = eval(eval(rlt[head:tail])) # The inner eval remove quotes and recover escaped characters.
except:
try:
# some hack for installed.packages()
code = rlt[head:tail]
code = code.replace('\\\\"', '"')
#code = code.replace("\n", "")
#print(code)
rlt = eval(code) # replace \\" with "
except Exception as e:
print(e)
raise RError(rlt)
return(rlt)
def __setitem__(self, obj, val): # to model a dict: "r['XXX'] = YYY"
""" Assign a value (val) to an R variable (obj).
:param obj: a string - the name of an R variable
:param val: a python object - the value to be passed to an R object
"""
if obj.startswith('_'):
raise RError('Leading underscore ("_") is not permitted in R variable names!')
self.__call__('%s <- %s' % (obj, Str4R(val)))
def __delitem__(self, obj): # to model a dict: "del r['XXX']"
if obj.startswith('_'):
raise RError('Leading underscore ("_") is not permitted in R variable names!')
self.__call__('rm(%s)' % obj)
def __del__(self): # to model "del r"
# if we do not have those 2 lines, prog may not be in the dictionary
# and we enter in an infite recursion loop ....
if 'prog' not in self.__dict__.keys():
return
if self.prog:
try:
self.sendAll(self.prog, 'q("no")'+self.newline)
except:
pass
self.prog = None
def __getattr__(self, obj, use_dict=None): # to model object attribute: "r.XXX"
"""
:param obj: a string - the name of an R variable
:param use_dict: named list will be returned a dict if use_dict is True,
otherwise it will be a list of tuples (name, value)
"""
# Overriding __getattr__ is safer than __getattribute__ since it is
# only called as a last resort i.e. if there are no attributes in the
# instance that match the name
if obj in self.__dict__:
return(self.__dict__[obj])
if obj in self.__class__.__dict__:
return(self.__class__.__dict__[obj])
try:
if use_dict is None:
use_dict = self.use_dict
rlt = self.__getitem__(obj, use_dict=use_dict)
except:
raise
return(rlt)
def __setattr__(self, obj, val): # to model object attribute: "r.XXX = YYY"
if obj in self.__dict__ or obj in self.__class__.__dict__: # or obj.startswith('_'):
self.__dict__[obj] = val # for old-style class
#object.__setattr__(self, obj, val) # for new-style class
else:
self.__setitem__(obj, val)
def __delattr__(self, obj): # to model object attribute: "del r.XXX"
if obj in self.__dict__:
del self.__dict__[obj]
else:
self.__delitem__(obj)
def get(self, obj, default=None, use_dict=None): # to model a dict: "r.get('XXX', 'YYY')"
"""
:param obj: a string - the name of an R variable, or an R expression
:param default: a python object - the value to be returned if failed to get data from R
:param use_dict: named list will be returned a dict if use_dict is True,
otherwise it will be a list of tuples (name, value). If use_dict is
None, the value of self.use_dict will be used instead.
"""
try:
rlt = self.__getitem__(obj, use_try=True, use_dict=use_dict)
except:
if True: # val is not None:
rlt = default
else:
raise RError('No this object!')
return(rlt)
run, assign, remove = __call__, __setitem__, __delitem__
| bsd-2-clause |
Achuth17/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
crichardson17/emgtemp | Metals_Sims/z_u_sim_plots.py | 2 | 17136 | import matplotlib.pyplot as plt
import numpy as np
import urllib
import matplotlib.cm as cm
Low_Temp_Color = 'k'
Mid_Temp_Color = 'g'
High_Temp_Color = 'r'
#Temp_Color = 0.5
Cloudy_Sim_Color = 'cyan'
markersize = 40
SDSS_File = '/Users/compastro/jenkins/emgtemp/data/4363_gr_5_0_err_dered.csv'
SDSS_Data = np.genfromtxt(SDSS_File,skip_header=1, delimiter = ',',dtype=float,unpack=True,names=True)
NII_6584 = SDSS_Data['Flux_NII_6583']
Ha_6562 = SDSS_Data['Flux_Ha_6562']
OI_6300 = SDSS_Data['Flux_OI_6300']
OIII_5006 = SDSS_Data['Flux_OIII_5006']
Hb_4861 = SDSS_Data['Flux_Hb_4861']
OIII_4363 = SDSS_Data['Flux_OIII_4363']
SII_6716 = SDSS_Data['Flux_SII_6716']
SII_6731 = SDSS_Data['Flux_SII_6730']
OII_3727 = SDSS_Data['Flux_OII_3726'] + SDSS_Data['Flux_OII_3728']
OIII_Hb = np.log10(OIII_5006/Hb_4861)
NII_Ha = np.log10(NII_6584/Ha_6562)
Temp_Ratio = np.log10(OIII_5006/OIII_4363)
S_Ratio = np.log10(SII_6716/SII_6731)
NO_Ratio = np.log10(NII_6584/OII_3727)
OI_Ratio = np.log10(OI_6300/Ha_6562)
O_Ratio = np.log10(OIII_5006/OII_3727)
S_Ha_Ratio = np.log10((SII_6716+SII_6731)/Ha_6562)
Cloudy_File = '/Users/compastro/jenkins/emgtemp/Metals_Sims/z_0.5_2.0_den_3.5_sims.pun'
Cloudy_Data = np.genfromtxt(Cloudy_File, delimiter = '\t',dtype=float,unpack=True,names=True)
Cloudy_NII_6584 = Cloudy_Data['N__2__6584A']
Cloudy_Ha_6562 = Cloudy_Data['H__1__6563A']
Cloudy_OIII_5006 = Cloudy_Data['O__3__5007A']
Cloudy_Hb_4861 = Cloudy_Data['TOTL__4861A']
Cloudy_OIII_4363 = Cloudy_Data['TOTL__4363A']
Cloudy_SII_6716 = Cloudy_Data['S_II__6716A']
Cloudy_SII_6731 = Cloudy_Data['S_II__6731A']
Cloudy_OII_3727 = Cloudy_Data['TOTL__3727A']
Cloudy_OI_6300 = Cloudy_Data['O__1__6300A']
Cloudy_OIII_Hb = np.log10(Cloudy_OIII_5006/Cloudy_Hb_4861)
Cloudy_NII_Ha = np.log10(Cloudy_NII_6584/Cloudy_Ha_6562)
Cloudy_Temp_Ratio = np.log10(Cloudy_OIII_5006/Cloudy_OIII_4363)
Cloudy_S_Ratio = np.log10(Cloudy_SII_6716/Cloudy_SII_6731)
Cloudy_NO_Ratio = np.log10(Cloudy_NII_6584/Cloudy_OII_3727)
Cloudy_OI_Ratio = np.log10(Cloudy_OI_6300/Cloudy_Ha_6562)
Cloudy_O_Ratio = np.log10(Cloudy_OIII_5006/Cloudy_OII_3727)
Cloudy_S_Ha_Ratio = np.log10((Cloudy_SII_6716+Cloudy_SII_6731)/Cloudy_Ha_6562)
Grid_File = '/Users/compastro/jenkins/emgtemp/Metals_Sims/z_0.6_2.0_sims.grd'
Grid_Data = np.genfromtxt(Grid_File,skip_header=1,delimiter = '\t',dtype=float,unpack=True)
Cloudy_Metals = Grid_Data[8,:]
Cloudy_Den = Grid_Data[6,:]
Cloudy_NII_Ha_array = np.reshape(Cloudy_NII_Ha,(6,-1))
Cloudy_OI_Ratio_array = np.reshape(Cloudy_OI_Ratio,(6,-1))
Cloudy_OIII_Hb_array = np.reshape(Cloudy_OIII_Hb,(6,-1))
Cloudy_Temp_Ratio_array = np.reshape(Cloudy_Temp_Ratio,(6,-1))
Cloudy_S_Ratio_array = np.reshape(Cloudy_S_Ratio,(6,-1))
Cloudy_NO_Ratio_array = np.reshape(Cloudy_NO_Ratio,(6,-1))
Cloudy_O_Ratio_array = np.reshape(Cloudy_O_Ratio,(6,-1))
Cloudy_S_Ha_Ratio_array = np.reshape(Cloudy_S_Ha_Ratio,(6,-1))
Cloudy_NII_Ha_transpose = np.transpose(Cloudy_NII_Ha_array)
Cloudy_OI_Ratio_transpose = np.transpose(Cloudy_OI_Ratio_array)
Cloudy_OIII_Hb_transpose = np.transpose(Cloudy_OIII_Hb_array)
Cloudy_Temp_Ratio_transpose = np.transpose(Cloudy_Temp_Ratio_array)
Cloudy_S_Ratio_transpose = np.transpose(Cloudy_S_Ratio_array)
Cloudy_NO_Ratio_transpose = np.transpose(Cloudy_NO_Ratio_array)
Cloudy_O_Ratio_transpose = np.transpose(Cloudy_O_Ratio_array)
Cloudy_S_Ha_Ratio_transpose = np.transpose(Cloudy_S_Ha_Ratio_array)
#cold_data_colors = [plt.cm.Blues(i) for i in np.linspace(0,1,len(SDSS_Data['z']))]
#mid_data_colors = [plt.cm.Greens(i) for i in np.linspace(0,1,len(SDSS_Data['z']))]
#hot_data_colors = [plt.cm.Reds(i) for i in np.linspace(0,1,len(SDSS_Data['z']))]
u_colors = [plt.cm.Reds(i) for i in np.linspace(0.25,1,7)]
metal_colors = [plt.cm.Blues(i) for i in np.linspace(0.25,1,6)]
#This is bad^ 3 and 7 are the number of densities and ionization parameters used, but ideally this wouldn't be hardcoded.
#sf_count = 0.0
#comp_count = 0.0
#agn_count = 0.0
#liner_count = 0.0
#amb_count = 0.0
shape = ['v']
#####################################################################################################
def getShape(NII_Ha, OIII_Hb, S_Ha_Ratio, OI_Ratio):
# Star forming
if OIII_Hb < 0.61/(NII_Ha-0.05)+1.3 and \
OIII_Hb < 0.72/(S_Ha_Ratio-0.32)+1.30 and \
OIII_Hb < 0.73/(OI_Ratio+0.59)+1.33:
shape = 'x'
#sf_count = sf_count+1
# Composite
elif 0.61/(NII_Ha-0.05)+1.3 < OIII_Hb and \
0.61/(NII_Ha-0.47)+1.19 > OIII_Hb:
shape = '+'
#comp_count = comp_count+1
# AGN
elif 0.61/(NII_Ha-0.47)+1.19 < OIII_Hb and \
0.72/(S_Ha_Ratio-0.32)+1.30 < OIII_Hb and \
0.73/(OI_Ratio+0.59)+1.33 < OIII_Hb and \
(1.89*S_Ha_Ratio)+0.76 < OIII_Hb and \
(1.18*OI_Ratio)+1.30 < OIII_Hb:
shape = 'D'
#agn_count = agn_count+1
# LINERs
elif 0.61/(NII_Ha-0.47)+1.19 < OIII_Hb and \
0.72/(S_Ha_Ratio-0.32)+1.30 < OIII_Hb and \
OIII_Hb < (1.89*S_Ha_Ratio)+0.76 and \
0.73/(OI_Ratio+0.59)+1.33 < OIII_Hb and \
OIII_Hb < (1.18*OI_Ratio)+1.30:
shape = 's'
#liner_count = liner_count+1
else:
# Ambiguous
shape = '*'
#amb_count = amb_count+1
return shape
#####################################################################################################
#####################################################################################################
def getColor(OIII_5006, OIII_4363):
Temp_Color = 'k'
if OIII_5006/OIII_4363<50:
#Temp_Color = '0.25'
Temp_Color = plt.cm.gray(0.2)
#red = red + 1
elif OIII_5006/OIII_4363>50 and OIII_5006/OIII_4363<100:
#Temp_Color = '0.5'
Temp_Color = plt.cm.gray(0.5)
#green = green + 1
elif OIII_5006/OIII_4363>100:
#Temp_Color = '0.75'
Temp_Color = plt.cm.gray(0.75)
#black = black + 1
else:
print ("error")
return Temp_Color
#####################################################################################################
fig = plt.figure(31)
fig.subplots_adjust(wspace=0.4,hspace=0.4)
sp1 = plt.subplot(221)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
#print(Temp_Color)
plt.scatter(NII_Ha[i],OIII_Hb[i],s = markersize, marker = shape, color = Temp_Color, edgecolor = 'none')
#print (Temp_Color)
#print(sf_count)
#print(comp_count)
#print(agn_count)
#print(liner_count)
#print(amb_count)
#print(red)
#print(green)
#print(black)
#print(counter)
plt.xlim(-2.5,0.5)
plt.ylim(-1,1.3)
plt.ylabel(r"log([OIII] $\lambda$5007/H$\beta$)")
plt.xlabel(r"log ([NII] $\lambda$6584/H$\alpha$)")
plt.title("BPT Diagram")
#plt.scatter(Cloudy_NII_Ha,Cloudy_OIII_Hb,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp1.set_color_cycle(u_colors)
plt.plot(Cloudy_NII_Ha_array,Cloudy_OIII_Hb_array, lw = '2')
sp1.set_color_cycle(metal_colors)
plt.plot(Cloudy_NII_Ha_transpose,Cloudy_OIII_Hb_transpose, lw = '2',linestyle = '--')
plt.legend([plt.scatter([],[],color='0.75', s = markersize), plt.scatter([],[],color='0.5', s = markersize), plt.scatter([],[],color='0.25', s = markersize)], (r"T$_e$<1.17*10$^4$",r"1.17*10$^4$<T$_e$<1.54*10$^4$",r"T$_e$>1.54*10$^4$"),scatterpoints = 1, loc = 'lower left',fontsize =8)
x=np.linspace(-1.5,0.3,50)
y=((.61/(x-.47))+1.19)
plt.plot(x,y,color=Low_Temp_Color)
x3=np.linspace(-1,-0.2,50)
y3=((.61/(x3-.05)+1.3))
plt.plot(x3,y3,linestyle='--',color='k')
#counter=0
sp2 = plt.subplot(222)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(NII_Ha[i],Temp_Ratio[i], s = markersize, marker = shape, color = Temp_Color, edgecolor = 'none')
#print(counter)
plt.ylabel(r"log([OIII] $\lambda$5007/4363)")
plt.xlabel(r"log ([NII] $\lambda$6584/H$\alpha$)")
plt.title("Temperature")
plt.ylim(0,3)
plt.xlim(-2.5,0.5)
#plt.scatter(Cloudy_NII_Ha,Cloudy_Temp_Ratio,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp2.set_color_cycle(u_colors)
plt.plot(Cloudy_NII_Ha_array,Cloudy_Temp_Ratio_array, lw = '2')
sp2.set_color_cycle(metal_colors)
plt.plot(Cloudy_NII_Ha_transpose,Cloudy_Temp_Ratio_transpose, lw = '2',linestyle = '--')
plt.legend([plt.scatter([],[],color='.75', s = markersize, marker = 'x', edgecolor = 'none'),plt.scatter([],[],color='0.75', s = markersize, marker = '+', edgecolor = 'none'), plt.scatter([],[],color='.75', s = markersize, marker = 'D', edgecolor = 'none'), plt.scatter([],[],color='.75', s = markersize, marker = 's', edgecolor = 'none'), plt.scatter([],[],color='.75', s = markersize, marker = '*', edgecolor = 'none')], ("Star-Forming","Composite","AGN","LINER","Ambiguous"),scatterpoints = 1, loc = 'lower left',fontsize =8)
sp3 = plt.subplot(223)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(NII_Ha[i],S_Ratio[i], s = markersize, marker = shape, c = Temp_Color, edgecolor = 'none')
plt.ylabel(r"log([SII] $\lambda$6717/6731)")
plt.xlabel(r"log ([NII] $\lambda$6584/H$\alpha$)")
plt.ylim(-1.0,1.0)
plt.xlim(-2.5,0.5)
plt.title("Density")
#plt.scatter(Cloudy_NII_Ha,Cloudy_S_Ratio,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp3.set_color_cycle(u_colors)
plt.plot(Cloudy_NII_Ha_array,Cloudy_S_Ratio_array, lw = '2')
sp3.set_color_cycle(metal_colors)
plt.plot(Cloudy_NII_Ha_transpose,Cloudy_S_Ratio_transpose, lw = '2',linestyle = '--')
#plt.legend([plt.scatter([],[],color=Low_Temp_Color, s = markersize), plt.scatter([],[],color=Mid_Temp_Color, s = markersize), plt.scatter([],[],color=High_Temp_Color, s = markersize),plt.scatter([],[],c=Cloudy_Sim_Color, s = markersize, edgecolor = 'none')], (r"$\frac{OIII[5007]}{OIII[4363]}$<50.0",r"$50.0<\frac{OIII[5007]}{OIII[4363]}<100.0$",r"$\frac{OIII[5007]}{OIII[4363]}$>100.0","Cloudy Simulation"),scatterpoints = 1, loc = 'lower left',fontsize =8)
sp4 = plt.subplot(224)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(NII_Ha[i],NO_Ratio[i], s = markersize, marker = shape, c = Temp_Color, edgecolor = 'none')
plt.ylabel(r"log([NII] $\lambda$6584/[OII] $\lambda$3727)")
plt.xlabel(r"log ([NII] $\lambda$6584/H$\alpha$)")
plt.title("Metallicity")
plt.xlim(-2.5,0.5)
#plt.scatter(Cloudy_NII_Ha,Cloudy_NO_Ratio,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp4.set_color_cycle(u_colors)
plt.plot(Cloudy_NII_Ha_array,Cloudy_NO_Ratio_array, lw = '2')
sp4.set_color_cycle(metal_colors)
plt.plot(Cloudy_NII_Ha_transpose,Cloudy_NO_Ratio_transpose, lw = '2',linestyle = '--')
#plt.legend([plt.scatter([],[],color=Low_Temp_Color, s = markersize), plt.scatter([],[],color=Mid_Temp_Color, s = markersize), plt.scatter([],[],color=High_Temp_Color, s = markersize),plt.scatter([],[],c=Cloudy_Sim_Color, s = markersize, edgecolor = 'none')], (r"$\frac{OIII[5007]}{OIII[4363]}$<50.0",r"$50.0<\frac{OIII[5007]}{OIII[4363]}<100.0$",r"$\frac{OIII[5007]}{OIII[4363]}$>100.0","Cloudy Simulation"),scatterpoints = 1, loc = 'lower left',fontsize =8)
plt.show()
plt.suptitle('hden = 3.5, -3.5 < U < -0.5, 0.5 < Z < 2.0')
#plt.savefig("Metallicity Sim Plots.pdf")
fig2 = plt.figure(32)
sp5 = plt.subplot(221)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(NII_Ha[i],OI_Ratio[i], s = markersize, marker = shape, c = Temp_Color, edgecolor = 'none')
plt.ylabel(r"log([OI] $\lambda$6300/H$\alpha$)")
plt.xlabel(r"log ([NII] $\lambda$6584/H$\alpha$)")
plt.title("OI_6300")
plt.xlim(-2.5,0.5)
plt.ylim(-2.5,0)
#plt.scatter(Cloudy_NII_Ha,Cloudy_OI_Ratio,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp5.set_color_cycle(u_colors)
plt.plot(Cloudy_NII_Ha_array,Cloudy_OI_Ratio_array, lw = '2')
sp5.set_color_cycle(metal_colors)
plt.plot(Cloudy_NII_Ha_transpose,Cloudy_OI_Ratio_transpose, lw = '2',linestyle = '--')
plt.legend([plt.scatter([],[],color='.75', s = markersize, marker = 'x', edgecolor = 'none'),plt.scatter([],[],color='0.75', s = markersize, marker = '+', edgecolor = 'none'), plt.scatter([],[],color='.75', s = markersize, marker = 'D', edgecolor = 'none'), plt.scatter([],[],color='.75', s = markersize, marker = 's', edgecolor = 'none'), plt.scatter([],[],color='.75', s = markersize, marker = '*', edgecolor = 'none')], ("Star-Forming","Composite","AGN","LINER","Ambiguous"),scatterpoints = 1, loc = 'lower left',fontsize =8)
sp6 = plt.subplot(222)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(OI_Ratio[i],OIII_Hb[i], s = markersize, marker = shape, c = Temp_Color, edgecolor = 'none')
plt.ylabel(r"log([OIII] $\lambda$5007/H$\beta$)")
plt.xlabel(r"log ([OI] $\lambda$6300/H$\alpha$)")
plt.title("OI_6300 vs. OIII_5007")
#plt.scatter(Cloudy_OI_Ratio,Cloudy_OIII_Hb,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp6.set_color_cycle(u_colors)
plt.plot(Cloudy_OI_Ratio_array,Cloudy_OIII_Hb_array, lw = '2')
sp6.set_color_cycle(metal_colors)
plt.plot(Cloudy_OI_Ratio_transpose,Cloudy_OIII_Hb_transpose, lw = '2',linestyle = '--')
x6 = np.linspace(-2.5,-0.6,50)
y6 = ((.73/(x6+0.59))+1.33)
plt.plot(x6,y6,color = 'k')
x7 = np.linspace(-1.125,0.25,50)
y7 = (1.18*x7) + 1.30
plt.plot(x7,y7, color = 'b')
plt.ylim(-1,1.5)
plt.xlim(-2.5,0.5)
#plt.legend([plt.scatter([],[],color=Low_Temp_Color, s = markersize), plt.scatter([],[],color=Mid_Temp_Color, s = markersize), plt.scatter([],[],color=High_Temp_Color, s = markersize),plt.scatter([],[],c=Cloudy_Sim_Color, s = markersize, edgecolor = 'none')], (r"$\frac{OIII[5007]}{OIII[4363]}$<50.0",r"$50.0<\frac{OIII[5007]}{OIII[4363]}<100.0$",r"$\frac{OIII[5007]}{OIII[4363]}$>100.0","Cloudy Simulation"),scatterpoints = 1, loc = 'lower left',fontsize =8)
sp7 = plt.subplot(223)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(OI_Ratio[i],O_Ratio[i], s = markersize, marker = shape, c = Temp_Color, edgecolor = 'none')
plt.ylabel(r"log([OIII] $\lambda$5007/[OII]$\lambda$3727)")
plt.xlabel(r"log ([OI] $\lambda$6300/H$\alpha$)")
plt.title("Groves Diagram")
#plt.scatter(Cloudy_OI_Ratio,Cloudy_O_Ratio,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp7.set_color_cycle(u_colors)
plt.plot(Cloudy_OI_Ratio_array,Cloudy_O_Ratio_array, lw = '2')
sp7.set_color_cycle(metal_colors)
plt.plot(Cloudy_OI_Ratio_transpose,Cloudy_O_Ratio_transpose, lw = '2',linestyle = '--')
x1 = np.linspace(-2.0,-.25,50)
y1 = ((-1.701*x1)-2.163)
x2 = np.linspace(-1.05998,0,50)
y2 = x2 + 0.7
plt.plot(x2,y2, color = 'k')
plt.plot(x1,y1, color = 'k')
plt.xlim(-2.5,0)
plt.ylim(-1.5,1)
#plt.legend([plt.scatter([],[],color=Low_Temp_Color, s = markersize), plt.scatter([],[],color=Mid_Temp_Color, s = markersize), plt.scatter([],[],color=High_Temp_Color, s = markersize),plt.scatter([],[],c=Cloudy_Sim_Color, s = markersize, edgecolor = 'none')], (r"$\frac{OIII[5007]}{OIII[4363]}$<50.0",r"$50.0<\frac{OIII[5007]}{OIII[4363]}<100.0$",r"$\frac{OIII[5007]}{OIII[4363]}$>100.0","Cloudy Simulation"),scatterpoints = 1, loc = 'lower left',fontsize =8)
sp8 = plt.subplot(224)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(S_Ha_Ratio[i],OIII_Hb[i], s = markersize, marker = shape, c = Temp_Color, edgecolor = 'none')
plt.ylabel(r"log([OIII] $\lambda$5007/H$\beta$)")
plt.xlabel(r"log ([SII]/H$\alpha$)")
plt.title("OIII_5007 vs. SII")
plt.ylim(-1,1.5)
x4 = np.linspace(-0.32,0.25,50)
y4 = ((1.89*x4)+0.76)
x5 = np.linspace(-1.5,0.25,50)
y5 = ((0.72/(x - 0.32))+1.3)
plt.plot(x5,y5,color = 'k')
plt.plot(x4,y4,color = 'b')
#plt.scatter(Cloudy_S_Ha_Ratio,Cloudy_OIII_Hb,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp8.set_color_cycle(u_colors)
plt.plot(Cloudy_S_Ha_Ratio_array,Cloudy_OIII_Hb_array, lw = '2')
sp8.set_color_cycle(metal_colors)
plt.plot(Cloudy_S_Ha_Ratio_transpose,Cloudy_OIII_Hb_transpose, lw = '2',linestyle = '--')
plt.suptitle('hden = 3.5, -3.5 < U < -0.5, 0.5 < Z < 2.0')
#plt.legend([plt.scatter([],[],color=Low_Temp_Color, s = markersize), plt.scatter([],[],color=Mid_Temp_Color, s = markersize), plt.scatter([],[],color=High_Temp_Color, s = markersize),plt.scatter([],[],c=Cloudy_Sim_Color, s = markersize, edgecolor = 'none')], (r"$\frac{OIII[5007]}{OIII[4363]}$<50.0",r"$50.0<\frac{OIII[5007]}{OIII[4363]}<100.0$",r"$\frac{OIII[5007]}{OIII[4363]}$>100.0","Cloudy Simulation"),scatterpoints = 1, loc = 'lower left',fontsize =8)
#plt.savefig("Metallicity Sim Plots1.pdf")
plt.show() | mit |
jorge2703/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 230 | 2823 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
| bsd-3-clause |
rahuldhote/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
0asa/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 2 | 21138 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
""" Test the pairwise_distance helper function. """
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Tests that precomputed metric returns pointer to, and not copy of, X.
S = np.dot(X, X.T)
S2 = pairwise_distances(S, metric="precomputed")
assert_true(S is S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_parallel():
rng = np.random.RandomState(0)
for func in (np.array, csr_matrix):
X = func(rng.random_sample((5, 4)))
Y = func(rng.random_sample((3, 4)))
S = euclidean_distances(X)
S2 = _parallel_pairwise(X, None, euclidean_distances, n_jobs=3)
assert_array_almost_equal(S, S2)
S = euclidean_distances(X, Y)
S2 = _parallel_pairwise(X, Y, euclidean_distances, n_jobs=3)
assert_array_almost_equal(S, S2)
def test_pairwise_kernels():
""" Test the pairwise_kernels helper function. """
def callable_rbf_kernel(x, y, **kwds):
""" Callable version of pairwise.rbf_kernel. """
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
""" Test the pairwise_distance helper function. """
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
""" Check pairwise minimum distances computation for any metric"""
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
""" Check the pairwise Euclidean distances computation"""
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
# Paired distances
def test_paired_euclidean_distances():
""" Check the paired Euclidean distances computation"""
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
""" Check the paired manhattan distances computation"""
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
""" Valid kernels should be symmetric"""
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity():
""" Test the cosine_similarity. """
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
""" Ensure that pairwise array check works for dense matrices."""
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
""" Ensure that if XA and XB are given correctly, they return as equal."""
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
""" Ensure an error is raised if the dimensions are different. """
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
""" Ensure an error is raised on 1D input arrays. """
XA = np.arange(45)
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.resize(np.arange(45), (5, 9))
XB = np.arange(32)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
""" Ensures that checks return valid sparse matrices. """
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
""" Turns a numpy matrix (any n-dimensional array) into tuples."""
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
""" Ensures that checks return valid tuples. """
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
""" Ensures that type float32 is preserved. """
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
nmartensen/pandas | pandas/tests/io/test_stata.py | 1 | 57819 | # -*- coding: utf-8 -*-
# pylint: disable=E1101
import datetime as dt
import os
import struct
import sys
import warnings
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
from pandas import compat
from pandas._libs.tslib import NaT
from pandas.compat import iterkeys
from pandas.core.dtypes.common import is_categorical_dtype
from pandas.core.frame import DataFrame, Series
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
class TestStata(object):
def setup_method(self, method):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
self.dta22_118 = os.path.join(self.dirpath, 'stata14_118.dta')
self.dta23 = os.path.join(self.dirpath, 'stata15.dta')
self.dta24_111 = os.path.join(self.dirpath, 'stata7_111.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path, write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
with StataReader(self.dta1_114) as rdr:
with warnings.catch_warnings(record=True) as w: # noqa
parsed_114_data = rdr.data()
with StataReader(self.dta1_114) as rdr:
parsed_114_read = rdr.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
parsed_114 = self.read_dta(self.dta1_114)
parsed_117 = self.read_dta(self.dta1_117)
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
pytest.skip('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
assert len(w) == 3
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected,
check_datetimelike_compat=True)
tm.assert_frame_equal(parsed_115, expected,
check_datetimelike_compat=True)
tm.assert_frame_equal(parsed_117, expected,
check_datetimelike_compat=True)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category')
for col in expected], axis=1)
# stata doesn't save .category metadata
tm.assert_frame_equal(parsed_113, expected, check_categorical=False)
tm.assert_frame_equal(parsed_114, expected, check_categorical=False)
tm.assert_frame_equal(parsed_115, expected, check_categorical=False)
tm.assert_frame_equal(parsed_117, expected, check_categorical=False)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_dta18(self):
parsed_118 = self.read_dta(self.dta22_118)
parsed_118["Bytes"] = parsed_118["Bytes"].astype('O')
expected = DataFrame.from_records(
[['Cat', 'Bogota', u'Bogotá', 1, 1.0, u'option b Ünicode', 1.0],
['Dog', 'Boston', u'Uzunköprü', np.nan, np.nan, np.nan, np.nan],
['Plane', 'Rome', u'Tromsø', 0, 0.0, 'option a', 0.0],
['Potato', 'Tokyo', u'Elâzığ', -4, 4.0, 4, 4],
['', '', '', 0, 0.3332999, 'option a', 1 / 3.]
],
columns=['Things', 'Cities', 'Unicode_Cities_Strl',
'Ints', 'Floats', 'Bytes', 'Longs'])
expected["Floats"] = expected["Floats"].astype(np.float32)
for col in parsed_118.columns:
tm.assert_almost_equal(parsed_118[col], expected[col])
with StataReader(self.dta22_118) as rdr:
vl = rdr.variable_labels()
vl_expected = {u'Unicode_Cities_Strl':
u'Here are some strls with Ünicode chars',
u'Longs': u'long data',
u'Things': u'Here are some things',
u'Bytes': u'byte data',
u'Ints': u'int data',
u'Cities': u'Here are some cities',
u'Floats': u'float data'}
tm.assert_dict_equal(vl, vl_expected)
assert rdr.data_label == u'This is a Ünicode data label'
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original, check_index_type=False)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer',
'floating', 'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
# original.index is np.int32, readed index is np.int64
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original, check_index_type=False)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5, 4), columns=list('abcd'))
df.loc[2, 'a':'c'] = np.nan
df_copy = df.copy()
with tm.ensure_clean() as path:
df.to_stata(path, write_index=False)
tm.assert_frame_equal(df, df_copy)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
assert result == expected
assert isinstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
assert result == expected
assert isinstance(result, unicode) # noqa
with tm.ensure_clean() as path:
encoded.to_stata(path, encoding='latin-1', write_index=False)
reread_encoded = read_stata(path, encoding='latin-1')
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number',
'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number',
'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
assert len(w) == 1
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
assert len(w) == 1
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2 ** 9, dtype=np.int16)
s2 = Series(2 ** 17, dtype=np.int32)
s3 = Series(2 ** 33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
def test_read_write_reread_dta14(self):
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
expected[col] = expected[col]._convert(datetime=True, numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
expected['date_td'] = pd.to_datetime(
expected['date_td'], errors='coerce')
parsed_113 = self.read_dta(self.dta14_113)
parsed_113.index.name = 'index'
parsed_114 = self.read_dta(self.dta14_114)
parsed_114.index.name = 'index'
parsed_115 = self.read_dta(self.dta14_115)
parsed_115.index.name = 'index'
parsed_117 = self.read_dta(self.dta14_117)
parsed_117.index.name = 'index'
tm.assert_frame_equal(parsed_114, parsed_113)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {'date_td': 'td'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index('index'), parsed_114)
def test_read_write_reread_dta15(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(
datetime.strptime, args=('%Y-%m-%d',))
parsed_113 = self.read_dta(self.dta15_113)
parsed_114 = self.read_dta(self.dta15_114)
parsed_115 = self.read_dta(self.dta15_115)
parsed_117 = self.read_dta(self.dta15_117)
tm.assert_frame_equal(expected, parsed_114)
tm.assert_frame_equal(parsed_113, parsed_114)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
def test_timestamp_and_label(self):
original = DataFrame([(1,)], columns=['variable'])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp,
data_label=data_label)
with StataReader(path) as reader:
assert reader.time_stamp == '29 Feb 2000 14:21'
assert reader.data_label == data_label
def test_numeric_column_names(self):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
original.index.name = 'index'
with tm.ensure_clean() as path:
# should get a warning for that format.
with tm.assert_produces_warning(InvalidColumnName):
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
columns = list(written_and_read_again.columns)
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
tm.assert_frame_equal(original, written_and_read_again)
def test_nan_to_missing_value(self):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
s1[::2] = np.nan
s2[1::2] = np.nan
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, original)
def test_no_index(self):
columns = ['x', 'y']
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)),
columns=columns)
original.index.name = 'index_not_written'
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
written_and_read_again = self.read_dta(path)
pytest.raises(
KeyError, lambda: written_and_read_again['index_not_written'])
def test_string_no_dates(self):
s1 = Series(['a', 'A longer string'])
s2 = Series([1.0, 2.0], dtype=np.float64)
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_large_value_conversion(self):
s0 = Series([1, 99], dtype=np.int8)
s1 = Series([1, 127], dtype=np.int8)
s2 = Series([1, 2 ** 15 - 1], dtype=np.int16)
s3 = Series([1, 2 ** 63 - 1], dtype=np.int64)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3})
original.index.name = 'index'
with tm.ensure_clean() as path:
with tm.assert_produces_warning(PossiblePrecisionLoss):
original.to_stata(path)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified['s1'] = Series(modified['s1'], dtype=np.int16)
modified['s2'] = Series(modified['s2'], dtype=np.int32)
modified['s3'] = Series(modified['s3'], dtype=np.float64)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_dates_invalid_column(self):
original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
original.index.name = 'index'
with tm.ensure_clean() as path:
with tm.assert_produces_warning(InvalidColumnName):
original.to_stata(path, {0: 'tc'})
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified.columns = ['_0']
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_105(self):
# Data obtained from:
# http://go.worldbank.org/ZXY29PVJ21
dpath = os.path.join(self.dirpath, 'S4_EDUC1.dta')
df = pd.read_stata(dpath)
df0 = [[1, 1, 3, -2], [2, 1, 2, -2], [4, 1, 1, -2]]
df0 = pd.DataFrame(df0)
df0.columns = ["clustnum", "pri_schl", "psch_num", "psch_dis"]
df0['clustnum'] = df0["clustnum"].astype(np.int16)
df0['pri_schl'] = df0["pri_schl"].astype(np.int8)
df0['psch_num'] = df0["psch_num"].astype(np.int8)
df0['psch_dis'] = df0["psch_dis"].astype(np.float32)
tm.assert_frame_equal(df.head(3), df0)
def test_date_export_formats(self):
columns = ['tc', 'td', 'tw', 'tm', 'tq', 'th', 'ty']
conversions = dict(((c, c) for c in columns))
data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns)
original = DataFrame([data], columns=columns)
original.index.name = 'index'
expected_values = [datetime(2006, 11, 20, 23, 13, 20), # Time
datetime(2006, 11, 20), # Day
datetime(2006, 11, 19), # Week
datetime(2006, 11, 1), # Month
datetime(2006, 10, 1), # Quarter year
datetime(2006, 7, 1), # Half year
datetime(2006, 1, 1)] # Year
expected = DataFrame([expected_values], columns=columns)
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, conversions)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_write_missing_strings(self):
original = DataFrame([["1"], [None]], columns=["foo"])
expected = DataFrame([["1"], [""]], columns=["foo"])
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_bool_uint(self):
s0 = Series([0, 1, True], dtype=np.bool)
s1 = Series([0, 1, 100], dtype=np.uint8)
s2 = Series([0, 1, 255], dtype=np.uint8)
s3 = Series([0, 1, 2 ** 15 - 100], dtype=np.uint16)
s4 = Series([0, 1, 2 ** 16 - 1], dtype=np.uint16)
s5 = Series([0, 1, 2 ** 31 - 100], dtype=np.uint32)
s6 = Series([0, 1, 2 ** 32 - 1], dtype=np.uint32)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3,
's4': s4, 's5': s5, 's6': s6})
original.index.name = 'index'
expected = original.copy()
expected_types = (np.int8, np.int8, np.int16, np.int16, np.int32,
np.int32, np.float64)
for c, t in zip(expected.columns, expected_types):
expected[c] = expected[c].astype(t)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, expected)
def test_variable_labels(self):
with StataReader(self.dta16_115) as rdr:
sr_115 = rdr.variable_labels()
with StataReader(self.dta16_117) as rdr:
sr_117 = rdr.variable_labels()
keys = ('var1', 'var2', 'var3')
labels = ('label1', 'label2', 'label3')
for k, v in compat.iteritems(sr_115):
assert k in sr_117
assert v == sr_117[k]
assert k in keys
assert v in labels
def test_minimal_size_col(self):
str_lens = (1, 100, 244)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len,
'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
with StataReader(path) as sr:
typlist = sr.typlist
variables = sr.varlist
formats = sr.fmtlist
for variable, fmt, typ in zip(variables, formats, typlist):
assert int(variable[1:]) == int(fmt[1:-1])
assert int(variable[1:]) == typ
def test_excessively_long_string(self):
str_lens = (1, 244, 500)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len,
'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with pytest.raises(ValueError):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_missing_value_generator(self):
types = ('b', 'h', 'l')
df = DataFrame([[0.0]], columns=['float_'])
with tm.ensure_clean() as path:
df.to_stata(path)
with StataReader(path) as rdr:
valid_range = rdr.VALID_RANGE
expected_values = ['.' + chr(97 + i) for i in range(26)]
expected_values.insert(0, '.')
for t in types:
offset = valid_range[t][1]
for i in range(0, 27):
val = StataMissingValue(offset + 1 + i)
assert val.string == expected_values[i]
# Test extremes for floats
val = StataMissingValue(struct.unpack('<f', b'\x00\x00\x00\x7f')[0])
assert val.string == '.'
val = StataMissingValue(struct.unpack('<f', b'\x00\xd0\x00\x7f')[0])
assert val.string == '.z'
# Test extremes for floats
val = StataMissingValue(struct.unpack(
'<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
assert val.string == '.'
val = StataMissingValue(struct.unpack(
'<d', b'\x00\x00\x00\x00\x00\x1a\xe0\x7f')[0])
assert val.string == '.z'
def test_missing_value_conversion(self):
columns = ['int8_', 'int16_', 'int32_', 'float32_', 'float64_']
smv = StataMissingValue(101)
keys = [key for key in iterkeys(smv.MISSING_VALUES)]
keys.sort()
data = []
for i in range(27):
row = [StataMissingValue(keys[i + (j * 27)]) for j in range(5)]
data.append(row)
expected = DataFrame(data, columns=columns)
parsed_113 = read_stata(self.dta17_113, convert_missing=True)
parsed_115 = read_stata(self.dta17_115, convert_missing=True)
parsed_117 = read_stata(self.dta17_117, convert_missing=True)
tm.assert_frame_equal(expected, parsed_113)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
def test_big_dates(self):
yr = [1960, 2000, 9999, 100, 2262, 1677]
mo = [1, 1, 12, 1, 4, 9]
dd = [1, 1, 31, 1, 22, 23]
hr = [0, 0, 23, 0, 0, 0]
mm = [0, 0, 59, 0, 0, 0]
ss = [0, 0, 59, 0, 0, 0]
expected = []
for i in range(len(yr)):
row = []
for j in range(7):
if j == 0:
row.append(
datetime(yr[i], mo[i], dd[i], hr[i], mm[i], ss[i]))
elif j == 6:
row.append(datetime(yr[i], 1, 1))
else:
row.append(datetime(yr[i], mo[i], dd[i]))
expected.append(row)
expected.append([NaT] * 7)
columns = ['date_tc', 'date_td', 'date_tw', 'date_tm', 'date_tq',
'date_th', 'date_ty']
# Fixes for weekly, quarterly,half,year
expected[2][2] = datetime(9999, 12, 24)
expected[2][3] = datetime(9999, 12, 1)
expected[2][4] = datetime(9999, 10, 1)
expected[2][5] = datetime(9999, 7, 1)
expected[4][2] = datetime(2262, 4, 16)
expected[4][3] = expected[4][4] = datetime(2262, 4, 1)
expected[4][5] = expected[4][6] = datetime(2262, 1, 1)
expected[5][2] = expected[5][3] = expected[
5][4] = datetime(1677, 10, 1)
expected[5][5] = expected[5][6] = datetime(1678, 1, 1)
expected = DataFrame(expected, columns=columns, dtype=np.object)
parsed_115 = read_stata(self.dta18_115)
parsed_117 = read_stata(self.dta18_117)
tm.assert_frame_equal(expected, parsed_115,
check_datetimelike_compat=True)
tm.assert_frame_equal(expected, parsed_117,
check_datetimelike_compat=True)
date_conversion = dict((c, c[-2:]) for c in columns)
# {c : c[-2:] for c in columns}
with tm.ensure_clean() as path:
expected.index.name = 'index'
expected.to_stata(path, date_conversion)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected,
check_datetimelike_compat=True)
def test_dtype_conversion(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
no_conversion = read_stata(self.dta15_117,
convert_dates=True)
tm.assert_frame_equal(expected, no_conversion)
conversion = read_stata(self.dta15_117,
convert_dates=True,
preserve_dtypes=False)
# read_csv types are the same
expected = self.read_csv(self.csv15)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
tm.assert_frame_equal(expected, conversion)
def test_drop_column(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
columns = ['byte_', 'int_', 'long_']
expected = expected[columns]
dropped = read_stata(self.dta15_117, convert_dates=True,
columns=columns)
tm.assert_frame_equal(expected, dropped)
# See PR 10757
columns = ['int_', 'long_', 'byte_']
expected = expected[columns]
reordered = read_stata(self.dta15_117, convert_dates=True,
columns=columns)
tm.assert_frame_equal(expected, reordered)
with pytest.raises(ValueError):
columns = ['byte_', 'byte_']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
with pytest.raises(ValueError):
columns = ['byte_', 'int_', 'long_', 'not_found']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
def test_categorical_writing(self):
original = DataFrame.from_records(
[
["one", "ten", "one", "one", "one", 1],
["two", "nine", "two", "two", "two", 2],
["three", "eight", "three", "three", "three", 3],
["four", "seven", 4, "four", "four", 4],
["five", "six", 5, np.nan, "five", 5],
["six", "five", 6, np.nan, "six", 6],
["seven", "four", 7, np.nan, "seven", 7],
["eight", "three", 8, np.nan, "eight", 8],
["nine", "two", 9, np.nan, "nine", 9],
["ten", "one", "ten", np.nan, "ten", 10]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled', 'unlabeled'])
expected = original.copy()
# these are all categoricals
original = pd.concat([original[col].astype('category')
for col in original], axis=1)
expected['incompletely_labeled'] = expected[
'incompletely_labeled'].apply(str)
expected['unlabeled'] = expected['unlabeled'].apply(str)
expected = pd.concat([expected[col].astype('category')
for col in expected], axis=1)
expected.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w: # noqa
# Silence warnings
original.to_stata(path)
written_and_read_again = self.read_dta(path)
res = written_and_read_again.set_index('index')
tm.assert_frame_equal(res, expected, check_categorical=False)
def test_categorical_warnings_and_errors(self):
# Warning for non-string labels
# Error for labels too long
original = pd.DataFrame.from_records(
[['a' * 10000],
['b' * 10000],
['c' * 10000],
['d' * 10000]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category')
for col in original], axis=1)
with tm.ensure_clean() as path:
pytest.raises(ValueError, original.to_stata, path)
original = pd.DataFrame.from_records(
[['a'],
['b'],
['c'],
['d'],
[1]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category')
for col in original], axis=1)
with warnings.catch_warnings(record=True) as w:
original.to_stata(path)
# should get a warning for mixed content
assert len(w) == 1
def test_categorical_with_stata_missing_values(self):
values = [['a' + str(i)] for i in range(120)]
values.append([np.nan])
original = pd.DataFrame.from_records(values, columns=['many_labels'])
original = pd.concat([original[col].astype('category')
for col in original], axis=1)
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
res = written_and_read_again.set_index('index')
tm.assert_frame_equal(res, original, check_categorical=False)
def test_categorical_order(self):
# Directly construct using expected codes
# Format is is_cat, col_name, labels (in order), underlying data
expected = [(True, 'ordered', ['a', 'b', 'c', 'd', 'e'], np.arange(5)),
(True, 'reverse', ['a', 'b', 'c',
'd', 'e'], np.arange(5)[::-1]),
(True, 'noorder', ['a', 'b', 'c', 'd',
'e'], np.array([2, 1, 4, 0, 3])),
(True, 'floating', [
'a', 'b', 'c', 'd', 'e'], np.arange(0, 5)),
(True, 'float_missing', [
'a', 'd', 'e'], np.array([0, 1, 2, -1, -1])),
(False, 'nolabel', [
1.0, 2.0, 3.0, 4.0, 5.0], np.arange(5)),
(True, 'int32_mixed', ['d', 2, 'e', 'b', 'a'],
np.arange(5))]
cols = []
for is_cat, col, labels, codes in expected:
if is_cat:
cols.append((col, pd.Categorical.from_codes(codes, labels)))
else:
cols.append((col, pd.Series(labels, dtype=np.float32)))
expected = DataFrame.from_items(cols)
# Read with and with out categoricals, ensure order is identical
parsed_115 = read_stata(self.dta19_115)
parsed_117 = read_stata(self.dta19_117)
tm.assert_frame_equal(expected, parsed_115, check_categorical=False)
tm.assert_frame_equal(expected, parsed_117, check_categorical=False)
# Check identity of codes
for col in expected:
if is_categorical_dtype(expected[col]):
tm.assert_series_equal(expected[col].cat.codes,
parsed_115[col].cat.codes)
tm.assert_index_equal(expected[col].cat.categories,
parsed_115[col].cat.categories)
def test_categorical_sorting(self):
parsed_115 = read_stata(self.dta20_115)
parsed_117 = read_stata(self.dta20_117)
# Sort based on codes, not strings
parsed_115 = parsed_115.sort_values("srh")
parsed_117 = parsed_117.sort_values("srh")
# Don't sort index
parsed_115.index = np.arange(parsed_115.shape[0])
parsed_117.index = np.arange(parsed_117.shape[0])
codes = [-1, -1, 0, 1, 1, 1, 2, 2, 3, 4]
categories = ["Poor", "Fair", "Good", "Very good", "Excellent"]
cat = pd.Categorical.from_codes(codes=codes, categories=categories)
expected = pd.Series(cat, name='srh')
tm.assert_series_equal(expected, parsed_115["srh"],
check_categorical=False)
tm.assert_series_equal(expected, parsed_117["srh"],
check_categorical=False)
def test_categorical_ordering(self):
parsed_115 = read_stata(self.dta19_115)
parsed_117 = read_stata(self.dta19_117)
parsed_115_unordered = read_stata(self.dta19_115,
order_categoricals=False)
parsed_117_unordered = read_stata(self.dta19_117,
order_categoricals=False)
for col in parsed_115:
if not is_categorical_dtype(parsed_115[col]):
continue
assert parsed_115[col].cat.ordered
assert parsed_117[col].cat.ordered
assert not parsed_115_unordered[col].cat.ordered
assert not parsed_117_unordered[col].cat.ordered
def test_read_chunks_117(self):
files_117 = [self.dta1_117, self.dta2_117, self.dta3_117,
self.dta4_117, self.dta14_117, self.dta15_117,
self.dta16_117, self.dta17_117, self.dta18_117,
self.dta19_117, self.dta20_117]
for fname in files_117:
for chunksize in 1, 2:
for convert_categoricals in False, True:
for convert_dates in False, True:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed = read_stata(
fname,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
itr = read_stata(
fname, iterator=True,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
pos = 0
for j in range(5):
with warnings.catch_warnings(record=True) as w: # noqa
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos:pos + chunksize, :]
tm.assert_frame_equal(
from_frame, chunk, check_dtype=False,
check_datetimelike_compat=True,
check_categorical=False)
pos += chunksize
itr.close()
def test_iterator(self):
fname = self.dta3_117
parsed = read_stata(fname)
with read_stata(fname, iterator=True) as itr:
chunk = itr.read(5)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
with read_stata(fname, chunksize=5) as itr:
chunk = list(itr)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk[0])
with read_stata(fname, iterator=True) as itr:
chunk = itr.get_chunk(5)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
with read_stata(fname, chunksize=5) as itr:
chunk = itr.get_chunk()
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
# GH12153
from_chunks = pd.concat(read_stata(fname, chunksize=4))
tm.assert_frame_equal(parsed, from_chunks)
def test_read_chunks_115(self):
files_115 = [self.dta2_115, self.dta3_115, self.dta4_115,
self.dta14_115, self.dta15_115, self.dta16_115,
self.dta17_115, self.dta18_115, self.dta19_115,
self.dta20_115]
for fname in files_115:
for chunksize in 1, 2:
for convert_categoricals in False, True:
for convert_dates in False, True:
# Read the whole file
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed = read_stata(
fname,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
# Compare to what we get when reading by chunk
itr = read_stata(
fname, iterator=True,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals)
pos = 0
for j in range(5):
with warnings.catch_warnings(record=True) as w: # noqa
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos:pos + chunksize, :]
tm.assert_frame_equal(
from_frame, chunk, check_dtype=False,
check_datetimelike_compat=True,
check_categorical=False)
pos += chunksize
itr.close()
def test_read_chunks_columns(self):
fname = self.dta3_117
columns = ['quarter', 'cpi', 'm1']
chunksize = 2
parsed = read_stata(fname, columns=columns)
with read_stata(fname, iterator=True) as itr:
pos = 0
for j in range(5):
chunk = itr.read(chunksize, columns=columns)
if chunk is None:
break
from_frame = parsed.iloc[pos:pos + chunksize, :]
tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
pos += chunksize
def test_write_variable_labels(self):
# GH 13631, add support for writing variable labels
original = pd.DataFrame({'a': [1, 2, 3, 4],
'b': [1.0, 3.0, 27.0, 81.0],
'c': ['Atlanta', 'Birmingham',
'Cincinnati', 'Detroit']})
original.index.name = 'index'
variable_labels = {'a': 'City Rank', 'b': 'City Exponent', 'c': 'City'}
with tm.ensure_clean() as path:
original.to_stata(path, variable_labels=variable_labels)
with StataReader(path) as sr:
read_labels = sr.variable_labels()
expected_labels = {'index': '',
'a': 'City Rank',
'b': 'City Exponent',
'c': 'City'}
assert read_labels == expected_labels
variable_labels['index'] = 'The Index'
with tm.ensure_clean() as path:
original.to_stata(path, variable_labels=variable_labels)
with StataReader(path) as sr:
read_labels = sr.variable_labels()
assert read_labels == variable_labels
def test_write_variable_label_errors(self):
original = pd.DataFrame({'a': [1, 2, 3, 4],
'b': [1.0, 3.0, 27.0, 81.0],
'c': ['Atlanta', 'Birmingham',
'Cincinnati', 'Detroit']})
values = [u'\u03A1', u'\u0391',
u'\u039D', u'\u0394',
u'\u0391', u'\u03A3']
variable_labels_utf8 = {'a': 'City Rank',
'b': 'City Exponent',
'c': u''.join(values)}
with pytest.raises(ValueError):
with tm.ensure_clean() as path:
original.to_stata(path, variable_labels=variable_labels_utf8)
variable_labels_long = {'a': 'City Rank',
'b': 'City Exponent',
'c': 'A very, very, very long variable label '
'that is too long for Stata which means '
'that it has more than 80 characters'}
with pytest.raises(ValueError):
with tm.ensure_clean() as path:
original.to_stata(path, variable_labels=variable_labels_long)
def test_default_date_conversion(self):
# GH 12259
dates = [dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
dt.datetime(1776, 7, 4, 7, 4, 7, 4000)]
original = pd.DataFrame({'nums': [1.0, 2.0, 3.0],
'strs': ['apple', 'banana', 'cherry'],
'dates': dates})
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
reread = read_stata(path, convert_dates=True)
tm.assert_frame_equal(original, reread)
original.to_stata(path,
write_index=False,
convert_dates={'dates': 'tc'})
direct = read_stata(path, convert_dates=True)
tm.assert_frame_equal(reread, direct)
def test_unsupported_type(self):
original = pd.DataFrame({'a': [1 + 2j, 2 + 4j]})
with pytest.raises(NotImplementedError):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_unsupported_datetype(self):
dates = [dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
dt.datetime(1776, 7, 4, 7, 4, 7, 4000)]
original = pd.DataFrame({'nums': [1.0, 2.0, 3.0],
'strs': ['apple', 'banana', 'cherry'],
'dates': dates})
with pytest.raises(NotImplementedError):
with tm.ensure_clean() as path:
original.to_stata(path, convert_dates={'dates': 'tC'})
dates = pd.date_range('1-1-1990', periods=3, tz='Asia/Hong_Kong')
original = pd.DataFrame({'nums': [1.0, 2.0, 3.0],
'strs': ['apple', 'banana', 'cherry'],
'dates': dates})
with pytest.raises(NotImplementedError):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_repeated_column_labels(self):
# GH 13923
with pytest.raises(ValueError) as cm:
read_stata(self.dta23, convert_categoricals=True)
assert 'wolof' in cm.exception
def test_stata_111(self):
# 111 is an old version but still used by current versions of
# SAS when exporting to Stata format. We do not know of any
# on-line documentation for this version.
df = read_stata(self.dta24_111)
original = pd.DataFrame({'y': [1, 1, 1, 1, 1, 0, 0, np.NaN, 0, 0],
'x': [1, 2, 1, 3, np.NaN, 4, 3, 5, 1, 6],
'w': [2, np.NaN, 5, 2, 4, 4, 3, 1, 2, 3],
'z': ['a', 'b', 'c', 'd', 'e', '', 'g', 'h',
'i', 'j']})
original = original[['y', 'x', 'w', 'z']]
tm.assert_frame_equal(original, df)
def test_out_of_range_double(self):
# GH 14618
df = DataFrame({'ColumnOk': [0.0,
np.finfo(np.double).eps,
4.49423283715579e+307],
'ColumnTooBig': [0.0,
np.finfo(np.double).eps,
np.finfo(np.double).max]})
with pytest.raises(ValueError) as cm:
with tm.ensure_clean() as path:
df.to_stata(path)
assert 'ColumnTooBig' in cm.exception
df.loc[2, 'ColumnTooBig'] = np.inf
with pytest.raises(ValueError) as cm:
with tm.ensure_clean() as path:
df.to_stata(path)
assert 'ColumnTooBig' in cm.exception
assert 'infinity' in cm.exception
def test_out_of_range_float(self):
original = DataFrame({'ColumnOk': [0.0,
np.finfo(np.float32).eps,
np.finfo(np.float32).max / 10.0],
'ColumnTooBig': [0.0,
np.finfo(np.float32).eps,
np.finfo(np.float32).max]})
original.index.name = 'index'
for col in original:
original[col] = original[col].astype(np.float32)
with tm.ensure_clean() as path:
original.to_stata(path)
reread = read_stata(path)
original['ColumnTooBig'] = original['ColumnTooBig'].astype(
np.float64)
tm.assert_frame_equal(original,
reread.set_index('index'))
original.loc[2, 'ColumnTooBig'] = np.inf
with pytest.raises(ValueError) as cm:
with tm.ensure_clean() as path:
original.to_stata(path)
assert 'ColumnTooBig' in cm.exception
assert 'infinity' in cm.exception
def test_invalid_encoding(self):
# GH15723, validate encoding
original = self.read_csv(self.csv3)
with pytest.raises(ValueError):
with tm.ensure_clean() as path:
original.to_stata(path, encoding='utf-8')
def test_path_pathlib(self):
df = tm.makeDataFrame()
df.index.name = 'index'
reader = lambda x: read_stata(x).set_index('index')
result = tm.round_trip_pathlib(df.to_stata, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath(self):
df = tm.makeDataFrame()
df.index.name = 'index'
reader = lambda x: read_stata(x).set_index('index')
result = tm.round_trip_localpath(df.to_stata, reader)
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize('write_index', [True, False])
def test_value_labels_iterator(self, write_index):
# GH 16923
d = {'A': ['B', 'E', 'C', 'A', 'E']}
df = pd.DataFrame(data=d)
df['A'] = df['A'].astype('category')
with tm.ensure_clean() as path:
df.to_stata(path, write_index=write_index)
dta_iter = pd.read_stata(path, iterator=True)
value_labels = dta_iter.value_labels()
assert value_labels == {'A': {0: 'A', 1: 'B', 2: 'C', 3: 'E'}}
def test_set_index(self):
# GH 17328
df = tm.makeDataFrame()
df.index.name = 'index'
with tm.ensure_clean() as path:
df.to_stata(path)
reread = pd.read_stata(path, index_col='index')
tm.assert_frame_equal(df, reread)
| bsd-3-clause |
q1ang/scikit-learn | doc/sphinxext/gen_rst.py | 106 | 40198 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet, is_backref=False):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. only:: html\n\n')
out.append(' .. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
if is_backref:
out.append('.. only:: not html\n\n * :ref:`example_%s`' % ref_name)
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet, is_backref=True))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
wadda/Bari | bariscroller.py | 1 | 8327 | # coding=utf-8
import sys
import matplotlib
import numpy as np
from PyQt4 import QtCore
from PyQt4 import QtGui
matplotlib.use("Qt4Agg")
from matplotlib.figure import Figure
from matplotlib.animation import TimedAnimation
from matplotlib.lines import Line2D
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
import time
import threading
import ms5637
__author__ = 'Moe'
__copyright__ = 'Copyright 2017 Moe'
__license__ = 'MIT'
__version__ = '0.0.2'
# pinched and modified from http://stackoverflow.com/questions/11874767/real-time-plotting-in-while-loop-with-matplotlib
# Bari sensor of MS5637
sensor = ms5637.Chip()
XLIMIT = 5400
YUPPER = 106000
YLOWER = 98000
SLEEP_FOR = .052 # Guesstimate for approx. 10 readings per second.
def setCustomSize(x, width, height):
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(x.sizePolicy().hasHeightForWidth())
x.setSizePolicy(sizePolicy)
x.setMinimumSize(QtCore.QSize(width, height))
x.setMaximumSize(QtCore.QSize(width, height))
class CustomMainWindow(QtGui.QMainWindow):
def __init__(self):
super(CustomMainWindow, self).__init__()
# Define the geometry of the main window
self.setGeometry(100, 50, 1000, 500)
self.setWindowTitle("Drummoyne Wharf")
# Create FRAME_A
self.FRAME_A = QtGui.QFrame(self)
self.FRAME_A.setStyleSheet("QWidget { background-color: %s }" % QtGui.QColor(210, 210, 235, 255).name())
self.LAYOUT_A = QtGui.QGridLayout()
self.FRAME_A.setLayout(self.LAYOUT_A)
self.setCentralWidget(self.FRAME_A)
#
# self.button_test = QtGui.QPushButton(text='Test')
# setCustomSize(self.button_test, 60, 30)
# self.button_test.clicked.connect(self.test_me())
self.buttonbox = QtGui.QHBoxLayout()
# self.FRAME_A.setLayout(self.buttonbox)
# IN button
self.zoomInBtn = QtGui.QPushButton(text='In')
setCustomSize(self.zoomInBtn, 60, 30)
self.zoomInBtn.clicked.connect(self.zoomIn)
self.LAYOUT_A.addWidget(self.zoomInBtn, *(1, 0))
# OUT button
self.zoomOutBtn = QtGui.QPushButton(text='Out')
setCustomSize(self.zoomOutBtn, 60, 30)
self.zoomOutBtn.clicked.connect(self.zoomOut)
self.LAYOUT_A.addWidget(self.zoomOutBtn, *(1, 1))
# x fewer button
self.XfewerBtn = QtGui.QPushButton(text='Less')
setCustomSize(self.XfewerBtn, 60, 30)
self.XfewerBtn.clicked.connect(self.x_fewer)
self.LAYOUT_A.addWidget(self.XfewerBtn, *(2, 0))
# x more button
self.XmoreBtn = QtGui.QPushButton(text='More')
setCustomSize(self.XmoreBtn, 60, 30)
self.XmoreBtn.clicked.connect(self.x_more)
self.LAYOUT_A.addWidget(self.XmoreBtn, *(2, 1))
# Place the matplotlib figure
self.myFig = CustomFigCanvas()
self.LAYOUT_A.addWidget(self.myFig, *(0, 1))
# Add the callbackfunc to ..
myDataLoop = threading.Thread(name='myDataLoop', target=dataSendLoop, daemon=True,
args=(self.addData_callbackFunc,))
myDataLoop.start()
self.show()
def zoomIn(self):
print("IN zoom")
self.myFig.zoomIn(1000)
def zoomOut(self):
print("zoom OUT")
self.myFig.zoomOut(1000)
def x_fewer(self):
print("IN zoom")
self.myFig.lessIn(500)
def x_more(self):
print("zoom OUT")
self.myFig.moreOut(500)
def addData_callbackFunc(self, value):
# print("Add data: " + str(value))
self.myFig.addData(value)
class CustomFigCanvas(FigureCanvas, TimedAnimation):
def __init__(self):
self.addedData = []
print('Loading...', matplotlib.__version__)
# The data
self.xlim = XLIMIT
self.n = np.linspace(self.xlim - 1, 0, self.xlim)
self.y = (self.n * 0.0) + YLOWER
# The window
self.fig = Figure(figsize=(12, 3), dpi=80)
self.ax1 = self.fig.add_subplot(111)
# self.ax1 settings
self.ax1.set_xlabel('Readings incremented time')
self.ax1.set_ylabel('River Level - Raw data')
self.line1 = Line2D([], [], color='blue', aa=True)
self.line1_tail = Line2D([], [], color='red', linewidth=1)
self.line1_head = Line2D([], [], color='red', marker='*', markeredgecolor='r')
# self.line2 = Line2D([], [], linewidth=5, color='red', )
self.ax1.add_line(self.line1)
self.ax1.add_line(self.line1_tail)
self.ax1.add_line(self.line1_head)
# self.ax1.add_line(self.line2)
self.ax1.set_xlim(0, self.xlim - 1) # REVERSE GRAPHING X AXIS HERE
self.ax1.set_ylim(YLOWER, YUPPER)
FigureCanvas.__init__(self, self.fig)
TimedAnimation.__init__(self, self.fig, interval=50, blit=True)
def new_frame_seq(self):
return iter(range(self.n.size))
def _init_draw(self):
lines = [self.line1, self.line1_tail, self.line1_head]
for l in lines:
l.set_data([], [])
def addData(self, value):
self.addedData.append(value)
def zoomIn(self, value):
bottom = self.ax1.get_ylim()[0]
top = self.ax1.get_ylim()[1]
bottom += value
top -= value
self.ax1.set_ylim(bottom, top)
self.draw()
def zoomOut(self, value):
bottom = self.ax1.get_ylim()[0]
top = self.ax1.get_ylim()[1]
bottom -= value
top += value
self.ax1.set_ylim(bottom, top)
self.draw()
def lessIn(self, value):
left = self.ax1.get_xlim()[0]
right = self.ax1.get_xlim()[1]
right -= value
self.ax1.set_xlim(left, right)
self.draw()
def moreOut(self, value):
left = self.ax1.get_xlim()[0]
right = self.ax1.get_xlim()[1]
right += value
self.ax1.set_xlim(left, right)
self.draw()
def _step(self, *args):
# Extends the _step() method for the TimedAnimation class.
try:
TimedAnimation._step(self, *args)
except Exception as e:
self.abc += 1
print(str(self.abc))
TimedAnimation._stop(self)
pass
def _draw_frame(self, framedata):
margin = 2
while (len(self.addedData) > 0):
self.y = np.roll(self.y, -1)
self.y[-1] = self.addedData[0]
del (self.addedData[0])
# np.ro
self.line1.set_data(self.n[0: self.n.size - margin], self.y[0: self.n.size - margin])
self.line1_tail.set_data(np.append(self.n[-100:-1 - margin], self.n[-1 - margin]),
np.append(self.y[-100:-1 - margin], self.y[-1 - margin]))
self.line1_head.set_data(self.n[-1 - margin], self.y[-1 - margin])
# self.line2.set_data(self.n[1: self.n.size - margin], self.y[1: self.n.size - margin])
self._drawn_artists = [self.line1, self.line1_tail, self.line1_head] # , self.line2]
# self.line2_tail.set_data(np.append(self.n[-10:-1 - margin], self.n[-1 - margin]),
# np.append(self.y[-10:-1 - margin], self.y[-1 - margin]))
# self.line2_head.set_data(self.n[-1 - margin], self.y[-1 - margin])
# self._drawn_artists = [self.line2]
class Communicate(QtCore.QObject):
data_signal = QtCore.pyqtSignal(float)
def dataSendLoop(addData_callbackFunc):
# Setup the signal-slot mechanism.
mySrc = Communicate()
mySrc.data_signal.connect(addData_callbackFunc)
n = np.linspace(0, 499, 500)
i = 0
while (True):
if (i > 499):
i = 0
# pressure, _temperature = sensor.bari()
pressure, _temperature = sensor.get_data()
time.sleep(SLEEP_FOR) # (.052) # Guestimated 1/10 second readinging with 5367 Chip lag
mySrc.data_signal.emit(pressure) # <- Here you emit a signal!
i += 1
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
QtGui.QApplication.setStyle(QtGui.QStyleFactory.create('Plastique'))
myGUI = CustomMainWindow()
sys.exit(app.exec_())
| mit |
PhonologicalCorpusTools/CorpusTools | corpustools/visualize.py | 1 | 3419 | import re
import numpy as np
import pdb
from scipy.cluster.hierarchy import linkage, dendrogram
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
import seaborn as sns
from sklearn.decomposition import PCA
def organize_data(reader, visualization_method, value_column, segment_column):
raw_data = {tuple([x[1:-1] for x in re.findall("'.+?'", r[segment_column])]): float(r[value_column]) for r in reader}
all_segments = list(set([segment for pair in raw_data for segment in pair]))
# ## TEMP: REMOVE VOWELS
# VOWELS = ['IY', 'UW', 'IH', 'EH', 'ER', 'AW', 'AY', 'EY', 'OW', 'OY', 'AA', 'AE', 'AH', 'AO', 'UH']
# all_segments = [s for s in all_segments if s not in VOWELS]
# ##
if visualization_method in ['pca', 'hm']:
m = np.zeros(shape=(len(all_segments),len(all_segments)), dtype=float)
for i, s1 in enumerate(all_segments):
for j, s2 in enumerate(all_segments):
if j > i:
try:
value = raw_data[(s1, s2)]
except KeyError:
value = raw_data[(s2, s1)]
m[i][j] = value
m[j][i] = value
m /= np.amax(m)
return (all_segments, m)
elif visualization_method == 'hc':
a = np.array([], dtype=float)
for i, s1 in enumerate(all_segments):
for j, s2 in enumerate(all_segments):
if j > i:
try:
value = raw_data[(s1, s2)]
except KeyError:
value = raw_data[(s2, s1)]
a = np.append(a, value)
a = (max(a) * 2) - a
return (all_segments, a)
def visualize(reader, visualization_method, value_column, segment_column):
# original_data = {row['result']: row['segment(s)'] for row in reader}
labels, data = organize_data(reader, visualization_method, value_column, segment_column)
data_dict = {label: datum for label, datum in zip(labels, data)}
if visualization_method == 'hc':
link = linkage(data)
dendrogram(link, leaf_label_func=lambda i: labels[i])
ax = plt.axes()
ax.set_title('Segment pair functional load: hierarchical clustering')
plt.gcf()
plt.show()
if visualization_method == 'hm':
ax = sns.heatmap(data)
ax.set_title('Segment pair functional load: heatmap')
plt.xticks([p+0.5 for p in range(len(labels))], labels)
plt.yticks([p+0.5 for p in range(len(labels))], reversed(labels))
plt.show()
if visualization_method == 'pca':
n = len(labels)
data -= data.mean()
clf = PCA(n_components=2)
transformed = clf.fit_transform(data)
# def get_sim(s1, s2):
# i1 = labels.index(s1)
# i2 = labels.index(s2)
# print(similarities[i1][i2])
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
ax.set_title('Segment pair functional load: first two principal components')
plt.scatter(transformed[:, 0], transformed[:, 1], marker=',', c='b', s=0)
for label, x, y in zip(labels, transformed[:, 0], transformed[:, 1]):
plt.annotate(
label,
xy = (x, y), xytext = (0, 0),
textcoords = 'offset points')
plt.show()
| bsd-3-clause |
Jimmy-Morzaria/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
agrimaldi/metaseq | metaseq/colormap_adjust.py | 1 | 5162 | """
Module to handle custom colormaps.
`cmap_powerlaw_adjust`, `cmap_center_adjust`, and
`cmap_center_adjust` are from
https://sites.google.com/site/theodoregoetz/notes/matplotlib_colormapadjust
"""
import math
import copy
import numpy
import numpy as np
from matplotlib import pyplot, colors, cm
import matplotlib
import colorsys
def color_test(color):
"""
Figure filled in with `color`; useful for troubleshooting or experimenting
with colors
"""
if isinstance(color, np.ndarray):
color = color.ravel()
fig = pyplot.figure(figsize=(2, 2), facecolor=color)
def smart_colormap(vmin, vmax, color_high='#b11902', hue_low=0.6):
"""
Creates a "smart" colormap that is centered on zero, and accounts for
asymmetrical vmin and vmax by matching saturation/value of high and low
colors.
It works by first creating a colormap from white to `color_high`. Setting
this color to the max(abs([vmin, vmax])), it then determines what the color
of min(abs([vmin, vmax])) should be on that scale. Then it shifts the
color to the new hue `hue_low`, and finally creates a new colormap with the
new hue-shifted as the low, `color_high` as the max, and centered on zero.
:param color_high: a matplotlib color -- try "#b11902" for a nice red
:param hue_low: float in [0, 1] -- try 0.6 for a nice blue
:param vmin: lowest value in data you'll be plotting
:param vmax: highest value in data you'll be plotting
"""
# first go from white to color_high
orig_cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
'test', ['#FFFFFF', color_high], N=2048)
# For example, say vmin=-3 and vmax=9. If vmin were positive, what would
# its color be?
vmin = float(vmin)
vmax = float(vmax)
mx = max([vmin, vmax])
mn = min([vmin, vmax])
frac = abs(mn / mx)
rgb = orig_cmap(frac)[:-1]
# Convert to HSV and shift the hue
hsv = list(colorsys.rgb_to_hsv(*rgb))
hsv[0] = hue_low
new_rgb = colorsys.hsv_to_rgb(*hsv)
new_hex = matplotlib.colors.rgb2hex(new_rgb)
zeropoint = -vmin / (vmax - vmin)
# Create a new colormap using the new hue-shifted color as the low end
new_cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
'test', [(0, new_rgb), (zeropoint, '#FFFFFF'), (1, color_high)],
N=2048)
return new_cmap
def cmap_powerlaw_adjust(cmap, a):
"""
Returns a new colormap based on the one given
but adjusted via power-law, `newcmap = oldcmap**a`.
:param cmap: colormap instance (e.g., cm.jet)
:param a: power
"""
if a < 0.:
return cmap
cdict = copy.copy(cmap._segmentdata)
fn = lambda x: (x[0] ** a, x[1], x[2])
for key in ('red', 'green', 'blue'):
cdict[key] = map(fn, cdict[key])
cdict[key].sort()
assert (cdict[key][0] < 0 or cdict[key][-1] > 1), \
"Resulting indices extend out of the [0, 1] segment."
return colors.LinearSegmentedColormap('colormap', cdict, 1024)
def cmap_center_adjust(cmap, center_ratio):
"""
Returns a new colormap based on the one given
but adjusted so that the old center point higher
(>0.5) or lower (<0.5)
:param cmap: colormap instance (e.g., cm.jet)
:param center_ratio:
"""
if not (0. < center_ratio) & (center_ratio < 1.):
return cmap
a = math.log(center_ratio) / math.log(0.5)
return cmap_powerlaw_adjust(cmap, a)
def cmap_center_point_adjust(cmap, range, center):
"""
Converts center to a ratio between 0 and 1 of the
range given and calls cmap_center_adjust(). returns
a new adjusted colormap accordingly
:param cmap: colormap instance
:param range: Tuple of (min, max)
:param center: New cmap center
"""
if not ((range[0] < center) and (center < range[1])):
return cmap
return cmap_center_adjust(
cmap,
abs(center - range[0]) / abs(range[1] - range[0]))
if __name__ == '__main__':
def func3(x, y):
return (1 - x / 2 + x ** 5 + y ** 3) * numpy.exp(-x ** 2 - y ** 2)
x = numpy.linspace(-3.0, 3.0, 60)
y = numpy.linspace(-3.0, 3.0, 60)
X, Y = numpy.meshgrid(x, y)
Z = func3(X, Y)
extent = [x[0], x[-1], y[0], y[-1]]
plotkwargs = {
'extent': extent,
'origin': 'lower',
'interpolation': 'nearest',
'aspect': 'auto'}
fig = pyplot.figure(figsize=(8, 3))
fig.subplots_adjust(left=.05, bottom=.11, right=.94, top=.83, wspace=.35)
ax = [fig.add_subplot(1, 3, i) for i in range(1, 4, 1)]
cmap = cm.seismic
plt = ax[0].imshow(Z, cmap=cmap, **plotkwargs)
cb = ax[0].figure.colorbar(plt, ax=ax[0])
ax[0].set_title('cmap: seismic')
plt = ax[1].imshow(Z, cmap=cmap_center_adjust(cmap, 0.75), **plotkwargs)
cb = ax[1].figure.colorbar(plt, ax=ax[1])
ax[1].set_title('center raised by 25%')
plt = ax[2].imshow(
Z,
cmap=cmap_center_point_adjust(
cmap, [numpy.min(Z), numpy.max(Z)], 0),
**plotkwargs)
cb = ax[2].figure.colorbar(plt, ax=ax[2])
ax[2].set_title('center set to 0')
pyplot.show()
| mit |
vital-ai/beaker-notebook | plugin/ipythonPlugins/src/dist/python3/beaker_runtime3.py | 1 | 19742 | # Copyright 2014 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
if sys.version_info < (3,0):
raise RuntimeError('Python2 was found when trying to start Python3. _beaker_python_mismatch_')
import os, json, pandas, numpy
import urllib.request, urllib.parse, urllib.error, urllib.request, urllib.error, urllib.parse, IPython, datetime, calendar, math, traceback, time
from IPython.utils.traitlets import Unicode
class OutputContainer:
def __init__(self):
self.items = []
def clear(self):
self.items = [ ]
def addItem(self, obj):
self.items.append(obj)
def getItems(self):
return self.items
class BeakerCodeCell:
def __init__(self, cellId, evaluatorId):
self.cellId = cellId
self.evaluatorId = evaluatorId
self.code = ''
self.outputtype = ''
self.output = None
self.tags = ''
def getCellId(self):
return self.cellId
def getEvaluatorId(self):
return self.evaluatorId
def getCode(self):
return self.code
def getOutputType(self):
return self.outputtype
def getOutput(self):
return self.output
def getTags(self):
return self.tags
def convertTypeName(typ):
if typ.startswith("float"):
return "double"
if typ.startswith("int") or typ.startswith("uint") or typ.startswith("short") or typ.startswith("ushort") or typ.startswith("long") or typ.startswith("ulong"):
return "integer"
if typ.startswith("bool"):
return "boolean"
if typ.startswith("date") or typ.startswith("Time"):
return "datetime"
return "string"
def isPrimitiveType(typ):
if typ.startswith("float"):
return True
if typ.startswith("int") or typ.startswith("uint") or typ.startswith("short") or typ.startswith("ushort") or typ.startswith("long") or typ.startswith("ulong"):
return True
if typ.startswith("bool"):
return True
if typ.startswith("date") or typ.startswith("Time"):
return True
if typ.startswith("str"):
return True
return False
def isListOfMaps(data):
if type(data) != list:
return False
for w in data:
if type(w) != dict:
return False
for v in w.values():
if not isPrimitiveType(type(v).__name__):
return False
return True
def isDictionary(data):
if type(data) != dict:
return False
for v in data.values():
if not isPrimitiveType(type(v).__name__):
return False
return True
def transformNaN(obj):
if not isinstance(obj, float):
return obj
if math.isnan(obj):
return "Nan";
if math.isinf(obj):
if obj>0:
return "Infinity"
else:
return "-Infinity"
return obj
def transformNaNs(obj):
for x in range(0,len(obj)):
i = obj[x];
if not isinstance(i, float):
continue
if math.isnan(i):
obj[x] = "NaN";
if math.isinf(i):
if i>0:
obj[x] = "Infinity"
else:
obj[x] = "-Infinity"
def fixNaNBack(obj):
if not isinstance(obj, str):
return obj
if obj == "NaN":
return float('nan')
if obj == "Infinity":
return float('inf')
if obj == "-Infinity":
return float('-inf')
return obj
def fixNaNsBack(obj):
for x in range(0,len(obj)):
i = obj[x];
if not isinstance(i, str):
continue
if i == "NaN":
obj[x] = float('nan')
if i == "Infinity":
obj[x] = float('inf')
if i == "-Infinity":
obj[x] = float('-inf')
def transform(obj):
if type(obj) == bytes:
return str(obj)
if isListOfMaps(obj):
out = {}
out['type'] = "TableDisplay"
out['subtype'] = "ListOfMaps"
cols = []
for l in obj:
cols.extend(l.keys())
cols = list(set(cols))
out['columnNames'] = cols
vals = []
for l in obj:
row = []
for r in cols:
if r in l:
row.append(transform(l[r]))
else:
row.append('')
vals.append(row)
out['values'] = vals
return out
if isDictionary(obj):
out = {}
out['type'] = "TableDisplay"
out['subtype'] = "Dictionary"
out['columnNames'] = [ "Key", "Value" ]
values = []
for k,v in obj.items():
values.append( [k, transform(v)] )
out['values'] = values
return out
if type(obj) == dict:
out = {}
for k,v in obj.items():
out[k] = transformNR(v)
return out
if type(obj) == list:
out = []
for v in obj:
out.append(transformNR(v))
return out
if isinstance(obj, OutputContainer):
out = {}
out['type'] = "OutputContainer"
items = []
for v in obj.getItems():
items.append(transform(v))
out['items'] = items
return out
if isinstance(obj, BeakerCodeCell):
out = {}
out['type'] = "BeakerCodeCell"
out['cellId'] = obj.getCellId()
out['evaluatorId'] = obj.getEvaluatorId()
out['code'] = obj.getCode()
out['outputtype'] = obj.getOutputType()
out['output'] = transformNR(obj.getOutput())
out['tags'] = obj.getTags()
return out
return transformNaN(obj)
def transformNR(obj):
if type(obj) == bytes:
return str(obj)
if type(obj) == dict:
out = {}
for k,v in obj.items():
out[k] = transformNR(v)
return out
if type(obj) == list:
out = []
for v in obj:
out.append(transformNR(v))
return out
if isinstance(obj, OutputContainer):
out = {}
out['type'] = "OutputContainer"
items = []
for v in obj.getItems():
items.append(transform(v))
out['items'] = items
return out
if isinstance(obj, BeakerCodeCell):
out = {}
out['type'] = "BeakerCodeCell"
out['cellId'] = obj.getCellId()
out['evaluatorId'] = obj.getEvaluatorId()
out['code'] = obj.getCode()
out['outputtype'] = obj.getOutputType()
out['output'] = transformNR(obj.getOutput())
out['tags'] = obj.getTags()
return out
return transformNaN(obj)
def transformBack(obj):
if type(obj) == dict:
out = {}
for k,v in obj.items():
out[str(k)] = transformBack(v)
if "type" in out:
if out['type'] == "BeakerCodeCell":
c = BeakerCodeCell(out['cellId'], out['evaluatorId'])
if 'code' in out:
c.code = out['code']
if 'outputtype' in out:
c.outputtype = out['outputtype']
if 'output' in out:
c.output = transformBack(out['output'])
if 'tags' in out:
c.tags = out['tags']
return c
if out['type'] == "OutputContainer":
c = OutputContainer()
if 'items' in out:
for i in out['items']:
c.addItem(i)
return c;
if out['type'] == "Date":
return datetime.datetime.fromtimestamp(out["timestamp"]/1000)
if out['type'] == "TableDisplay":
if 'subtype' in out:
if out['subtype'] == "Dictionary":
out2 = { }
for r in out['values']:
out2[r[0]] = fixNaNBack(r[1])
if out['columnNames'][0] == "Index":
return pandas.Series(out2)
return out2
if out['subtype'] == "Matrix":
vals = out['values']
fixNaNsBack(vals)
return numpy.matrix(vals)
if out['subtype'] == "ListOfMaps":
out2 = []
cnames = out['columnNames']
for r in out['values']:
out3 = { }
for i in range(len(cnames)):
if r[i] != '':
out3[ cnames[i] ] = r[i]
out2.append(out3)
return out2
# transform to dataframe
if ('hasIndex' in out) and (out['hasIndex'] == "true"):
# first column becomes the index
vals = out['values']
cnames = out['columnNames'][1:]
index = []
for x in range(0,len(vals)):
index.append(transformBack(vals[x][0]))
v = vals[x][1:]
fixNaNsBack(v)
vals[x] = v
return pandas.DataFrame(data=vals, columns=cnames, index=index)
else:
vals = out['values']
cnames = out['columnNames']
for x in range(0,len(vals)):
v = vals[x]
fixNaNsBack(v)
vals[x] = v
return pandas.DataFrame(data=vals, columns=cnames)
return out
if type(obj) == list:
out = []
for v in obj:
out.append(transformBack(v))
return out
try:
if type(obj) == bytes:
obj = str(obj)
except Exception as e:
return obj
return obj
# should be inner class to Beaker
class DataFrameEncoder(json.JSONEncoder):
def default(self, obj):
# similarly handle Panels.
# make this extensible by the user to handle their own types.
if isinstance(obj, numpy.generic):
return transformNaN(obj.item())
if isinstance(obj, numpy.ndarray) and obj.ndim == 2:
out = {}
out['type'] = "TableDisplay"
out['subtype'] = "Matrix"
cols = [ ]
for i in range(obj.shape[1]):
cols.append( "c" + str(i) )
out['columnNames'] =cols
vars = obj.tolist()
for x in range(0,len(vars)):
transformNaNs(vars[x])
out['values'] = vars
return out
if isinstance(obj, numpy.ndarray):
ret = obj.tolist()
transformNaNs(ret)
return ret
if type(obj) == datetime.datetime or type(obj) == datetime.date or type(obj).__name__ == 'Timestamp':
out = {}
out['type'] = "Date"
out['timestamp'] = calendar.timegm(obj.timetuple()) * 1000
return out
if type(obj) == pandas.core.frame.DataFrame:
out = {}
out['type'] = "TableDisplay"
out['subtype'] = "TableDisplay"
out['hasIndex'] = "true"
out['columnNames'] = ['Index'] + obj.columns.tolist()
vals = obj.values.tolist()
idx = obj.index.tolist()
for x in range(0,len(vals)):
vals[x] = [ idx[x] ] + vals[x]
ty = []
num = len(obj.columns.tolist())
x = 0;
for x in range(0,num+1):
ty.append( convertTypeName(type(vals[0][x]).__name__))
out['types'] = ty
for x in range(0,len(vals)):
transformNaNs(vals[x])
out['values'] = vals
return out
if type(obj) == pandas.core.series.Series:
basict = True
for i in range(len(obj)):
if not isPrimitiveType(type(obj[i]).__name__):
basict = False
break
if basict:
out = {}
out['type'] = "TableDisplay"
out['subtype'] = "Dictionary"
out['columnNames'] = [ "Index", "Value" ]
values = []
for k,v in obj.items():
values.append( [k, transform(v)] )
out['values'] = values
return out
return obj.to_dict()
return json.JSONEncoder.default(self, obj)
class MyJSONFormatter(IPython.core.formatters.BaseFormatter):
format_type = Unicode('application/json')
def __call__(self, obj):
try:
obj = transform(obj)
return json.dumps(obj, cls=DataFrameEncoder)
except Exception as e:
#print(e)
#traceback.print_exc()
return None
class Beaker:
"""Runtime support for Python code in Beaker."""
session_id = ''
core_url = '127.0.0.1:' + os.environ['beaker_core_port']
_beaker_password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
_beaker_password_mgr.add_password(None, core_url, 'beaker',
os.environ['beaker_core_password'])
_beaker_url_opener = urllib.request.build_opener(urllib.request.HTTPBasicAuthHandler(_beaker_password_mgr), urllib.request.ProxyHandler({}))
def set4(self, var, val, unset, sync):
args = {'name': var, 'session':self.session_id, 'sync':sync}
if not unset:
val = transform(val)
args['value'] = json.dumps(val, cls=DataFrameEncoder)
req = urllib.request.Request('http://' + self.core_url + '/rest/namespace/set',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
reply = conn.read().decode("utf-8")
if reply != 'ok':
raise NameError(reply)
def get(self, var):
req = urllib.request.Request('http://' + self.core_url + '/rest/namespace/get?' +
urllib.parse.urlencode({
'name': var,
'session':self.session_id}))
conn = self._beaker_url_opener.open(req)
result = json.loads(conn.read().decode())
if not result['defined']:
raise NameError('name \'' + var + '\' is not defined in notebook namespace')
return transformBack(result['value'])
def set_session(self, id):
self.session_id = id
def register_output(self):
ip = IPython.InteractiveShell.instance()
ip.display_formatter.formatters['application/json'] = MyJSONFormatter(parent=ip.display_formatter)
def set(self, var, val):
return self.set4(var, val, False, True)
def createOutputContainer(self):
return OutputContainer()
def showProgressUpdate(self):
return "WARNING: python3 language plugin does not support progress updates"
def evaluate(self,filter):
args = {'filter': filter, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/evaluate',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = json.loads(conn.read().decode())
return transformBack(result)
def evaluateCode(self, evaluator,code):
args = {'evaluator': evaluator, 'code' : code, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/evaluateCode',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = json.loads(conn.read().decode())
return transformBack(result)
def showStatus(self,msg):
args = {'msg': msg, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/showStatus',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = conn.read()
return result=="true"
def clearStatus(self,msg):
args = {'msg': msg, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/clearStatus',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = conn.read()
return result=="true"
def showTransientStatus(self,msg):
args = {'msg': msg, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/showTransientStatus',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = conn.read()
return result=="true"
def getEvaluators(self):
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/getEvaluators?' +
urllib.parse.urlencode({
'session':self.session_id}))
conn = self._beaker_url_opener.open(req)
result = json.loads(conn.read().decode())
return transformBack(result)
def getCodeCells(self,filter):
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/getCodeCells?' +
urllib.parse.urlencode({
'session':self.session_id, 'filter':filter}))
conn = self._beaker_url_opener.open(req)
result = json.loads(conn.read().decode())
return transformBack(result)
def setCodeCellBody(self,name,body):
args = {'name': name, 'body':body, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/setCodeCellBody',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = conn.read()
return result=="true"
def setCodeCellEvaluator(self,name,evaluator):
args = {'name': name, 'evaluator':evaluator, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/setCodeCellEvaluator',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = conn.read()
return result=="true"
def setCodeCellTags(self,name,tags):
args = {'name': name, 'tags':tags, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/setCodeCellTags',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = conn.read()
return result=="true"
def __setattr__(self, name, value):
if 'session_id' == name:
self.__dict__['session_id'] = value
return
return self.set(name, value)
def __getattr__(self, name):
return self.get(name)
| apache-2.0 |
RNAer/Calour | calour/database.py | 1 | 18564 | '''
database access functions (:mod:`calour.database`)
==================================================
.. currentmodule:: calour.database
Functions
^^^^^^^^^
.. autosummary::
:toctree: generated
add_terms_to_features
enrichment
'''
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Calour development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from logging import getLogger
from abc import ABC
import importlib
from .util import get_config_value, get_config_file, get_config_sections
from .experiment import Experiment
logger = getLogger(__name__)
def _get_database_class(dbname, exp=None, config_file_name=None):
'''Get the database class for the given database name
Uses the calour config file (calour.config) keys
Parameters
----------
dbname : str
the database name. common options are:
'dbbact' : the amplicon sequence manual annotation database
'spongeworld' : the sponge microbiome database
'redbiome' : the qiita automatic amplicon sequence database
Names are listed in the calour.config file as section names
config_file_name: str or None, optional
None (default) to use the default calour condig file.
str to use the file names str as the conig file
Returns
-------
calour.database.Database
A ``Database`` class for the requested dbname
'''
class_name = get_config_value('class_name', section=dbname, config_file_name=config_file_name)
module_name = get_config_value('module_name', section=dbname, config_file_name=config_file_name)
min_version = float(get_config_value('min_version', section=dbname, config_file_name=config_file_name, fallback='0.0'))
module_website = get_config_value('website', section=dbname, config_file_name=config_file_name, fallback='NA')
if class_name is not None and module_name is not None:
try:
# import the database module
db_module = importlib.import_module(module_name)
except ImportError:
module_installation = get_config_value('installation', section=dbname, config_file_name=config_file_name)
logger.warning('Database interface %s not installed.\nSkipping.\n'
'You can install the database using:\n%s\n'
'For details see: %s' % (module_name, module_installation, module_website))
return None
# get the class
DBClass = getattr(db_module, class_name)
cdb = DBClass(exp)
# test if database version is compatible
if min_version > 0:
db_version = cdb.version()
if db_version < min_version:
logger.warning('Please update %s database module. Current version (%f) not supported (minimal version %f).\nFor details see %s' % (dbname, db_version, min_version, module_website))
return cdb
# not found, so print available database names
databases = []
sections = get_config_sections()
for csection in sections:
class_name = get_config_value('class_name', section=csection, config_file_name=config_file_name)
module_name = get_config_value('class_name', section=csection, config_file_name=config_file_name)
if class_name is not None and module_name is not None:
databases.append(csection)
if len(databases) == 0:
logger.warning('calour config file %s does not contain any database sections. Skipping' % get_config_file())
return None
logger.warning('Database %s not found in config file (%s).\nSkipping.\n'
'Current databases in config file: %s' % (dbname, get_config_file(), databases))
return None
def add_terms_to_features(exp: Experiment, dbname, use_term_list=None, field_name='common_term', term_type=None, ignore_exp=None, **kwargs):
'''Add a field to the feature metadata, with most common term for each feature
Create a new feature_metadata field, with the most common term (out of term_list) for each feature in experiment.
It adds annotations in-place.
Parameters
----------
use_term_list : list of str or None, optional
Use only terms appearing in this list
None (default) to use all terms
field_name : str, optional
Name of feature_metadata field to store the annotatiosn.
term_type : str or None, optional
type of the annotation summary to get from the database (db specific)
None to get default type
ignore_exp : list of int or None, optional
list of experiments to ignore when adding the terms
**kwargs: database specific additional parameters (see database interface get_feature_terms() function for specific terms)
Returns
-------
Experiment
with feature_metadata field containing the most common database term for each feature
'''
logger.debug('Adding terms to features for database %s' % dbname)
db = _get_database_class(dbname, exp)
features = exp.feature_metadata.index.values
logger.debug('found %d features' % len(features))
# get the per feature term scores
term_list = db.get_feature_terms(features, exp=exp, term_type=term_type, ignore_exp=ignore_exp, **kwargs)
logger.debug('got %d terms from database' % len(term_list))
# find the most enriched term (out of the list) for each feature
feature_terms = []
for cfeature in features:
if cfeature not in term_list:
feature_terms.append('na')
continue
if len(term_list[cfeature]) == 0:
feature_terms.append('na')
continue
if use_term_list is None:
bterm = max(term_list[cfeature], key=term_list[cfeature].get)
else:
bterm = 'other'
bscore = 0
for cterm in use_term_list:
if cterm not in term_list[cfeature]:
continue
cscore = term_list[cfeature][cterm]
if cscore > bscore:
bscore = cscore
bterm = cterm
feature_terms.append(bterm)
exp.feature_metadata[field_name] = feature_terms
return exp
def enrichment(exp: Experiment, features, dbname, *args, **kwargs):
'''Get the list of enriched annotation terms in features compared to all other features in exp.
Uses the database specific enrichment analysis method.
Parameters
----------
features : list of str
The features to test for enrichment (compared to all other features in exp)
dbname : str
the database to use for the annotation terms and enrichment analysis
*args : tuple
**kwargs : dict
Additional database specific parameters (see per-database module documentation for .enrichment() method)
Returns
-------
pandas.DataFrame with info about significantly enriched terms. The columns include:
* 'feature' : the feature ID (str)
* 'pval' : the p-value for the enrichment (float)
* 'odif' : the effect size (float)
* 'observed' : the number of observations of this term in group1 (int)
* 'expected' : the expected (based on all features) number of observations of this term in group1 (float)
* 'frac_group1' : fraction of total terms in group 1 which are the specific term (float)
* 'frac_group2' : fraction of total terms in group 2 which are the specific term (float)
* 'num_group1' : number of total terms in group 1 which are the specific term (float)
* 'num_group2' : number of total terms in group 2 which are the specific term (float)
* 'description' : the term (str)
numpy.Array where rows are features (ordered like the dataframe), columns are terms, and value is score
for term in feature
pandas.DataFrame with info about the features used. columns:
* 'group' : int, the group (1/2) to which the feature belongs
* 'sequence': str
'''
db = _get_database_class(dbname, exp=exp)
if not db.can_do_enrichment:
raise ValueError('database %s does not support enrichment analysis' % dbname)
return db.enrichment(exp, features, *args, **kwargs)
class Database(ABC):
def __init__(self, exp=None, database_name=None, methods=['get', 'annotate', 'enrichment']):
'''Initialize the database interface
Parameters
----------
exp : Experiment or None, optional
The experiment link for the database (if needed)
database_name : str, optional
name of the database (for showing errors, etc.)
methods : list of str, optional
The integration level this database interface supports.
'get' if database interface supports get_seq_annotation_strings()
'annotate' if database interface supports add_annotation()
'enrichment' if database interface supports get_feature_terms()
'''
self.database_name = database_name
self._methods = set(methods)
self._exp = exp
@property
def annotatable(self):
'''True if the database supports adding annotations via the add_annotation() function
'''
return 'annotate' in self._methods
@property
def can_do_enrichment(self):
'''True if the database supports getting a dict of terms per feature via the get_feature_terms() function
'''
return 'enrichment' in self._methods
def get_seq_annotation_strings(self, feature):
'''Get nice string summaries of annotations for a given sequence
Parameters
----------
feature : str
the feature ID to query the database about
Returns
-------
list of [(dict,str)] (annotationdetails,annotationsummary)
a list of:
annotationdetails : dict
'annotationid' : int, the annotation id in the database
'annotationtype : str
...
annotationsummary : str
a short user readble summary of the annotation.
This will be displayed for the user
'''
logger.debug('Generic function for get_annotation_strings')
return []
def get_annotation_website(self, annotation):
'''Get the database website address of information about the annotation.
Used for the Jupyter GUI when clicking on an annotation in the list
(to open in a new browser tab)
Parameters
----------
annotation : dict
keys/values are database specific (supplied by the database interface when calling get_annotation_strings() ).
These keys/values can be used by the database interface here to determine which website address to return.
Returns
-------
str or None
The webaddress of the html page with details about the annotation,
or None if not available
'''
logger.debug('Generic function for get_annotation_website')
return None
def show_annotation_info(self, annotation):
'''Show details about the annotation.
This should use a database interface created GUI to show more details about the annotation.
Called from the qt5 heatmap GUI when double clicking on a database annotation.
A common GUI can be a new browser window with details about the annotation.
Parameters
----------
annotation : dict
keys/values are database specific (supplied by the database interface when calling get_annotation_strings() ).
These keys/values can be used by the database interface here to determine which website address to return.
'''
logger.debug('Generic function for show annotation info')
return
def add_annotation(self, features, exp):
'''Add an entry to the database about a set of features.
This is an optional function for databases that support manual annotations (level L4).
supporting this option is indicated by the "annotate" method in __init__()
It is called from the qt5 heatmap GUI when pressing the "Annotate" button.
All GUI should be supplied by the database interface.
Parameters
----------
features : list of str
the features to add to the database
exp : Experiment
the experiment where the features are coming from
Returns
-------
err : str
empty if ok, otherwise the error encountered
'''
logger.debug('Generic function for add_annotations')
raise NotImplementedError
def update_annotation(self, annotation, exp=None):
'''Update an existing annotation
This is an optional function for databases that support manual annotations (level L4).
supporting this option is indicated by the "annotate" method in __init__().
It is called when right clicking on an annotation in the qt5 GUI and selecting "update".
All GUI should be supplied by the database interface.
Parameters
----------
annotation : dict
The annotation to update (keys/values are database specific)
exp : Experiment, optional
The calour experiment from which the annotation is coming from
Returns
-------
str
empty if ok, otherwise the error encountered
'''
logger.debug('Generic function for update_annotation')
raise NotImplementedError
def delete_annotation(self, annotation_details):
'''Delete an annotation from the database (if allowed). All features associated with this annotation
lose this annotation.
This is an optional function for databases that support manual annotations (level L4).
supporting this option is indicated by the "annotate" method in __init__()
It is called when right clicking on an annotation in the qt5 GUI and selecting "delete".
Parameters
----------
annotation_details : dict
The details about the annotation to delete (annotationdetails from get_seq_annotation_strings() )
Should contain a unique identifier for the annotation (created/used by the database)
Returns
-------
str
empty if ok, otherwise the error encountered
'''
logger.debug('Generic function for delete_annotation')
return 'Not implemented'
def remove_feature_from_annotation(self, features, annotation_details):
'''remove a feature from the annotation in the database (if allowed). If after the removal the annotation contains no features,
it will be removed from the database. Otherwise, the annotation remains for the features not removed from it.
This is an optional function for databases that support manual annotations (level L4).
supporting this option is indicated by the "annotate" method in __init__()
It is called when right clicking on an annotation in the qt5 GUI and selecting "remove feature".
Parameters
----------
features : list of str
The feature ids to remove
annotation_details : dict
The details about the annotation to delete (annotationdetails from get_seq_annotation_strings() )
Should contain a unique identifier for the annotation (created/used by the database)
Returns
-------
str
empty if ok, otherwise the error encountered
'''
logger.debug('Generic function for remove_features_from_annotation')
return 'Not implemented'
def get_feature_terms(self, features, exp=None):
'''Get list of terms per feature
Parameters
----------
features : list of str
the features to get the terms for
exp : Experiment, optional
not None to store results inthe exp (to save time for multiple queries)
Returns
-------
feature_terms : dict of term scores associated with each feature.
Key is the feature (str), and the value is a dict of score for each term (i.e. key is the term str, value is the score for this term in this feature)
'''
logger.debug('Generic function for get_feature_terms')
return {}
def enrichment(self, exp, features, *args, **kwargs):
'''Get the list of enriched terms in features compared to all features in exp.
This is an optional function for databases that support enrichment analysis (level L3).
Parameters
----------
exp : Experiment
The experiment to compare the features to
features : list of str
The features (from exp) to test for enrichment
*args : tuple
**kwargs : dict
Additional database specific parameters
Returns
-------
pandas.DataFrame
Its columns include:
feature : str. the feature
pval : float. the p-value for the enrichment
odif : float. the effect size for the enrichment
term : str. the enriched term
'''
logger.debug('Generic function for enrichment')
return None
def show_term_details(self, term, exp, features, *args, **kwargs):
'''
Show details about the specific term in the database and in what features it appears.
This is an optional function, and is called when a user double clicks
an enriched term in the qt5 enrichment analysis (for an integration level L3 database).
It shows details why this term was denoted as enriched. This is a database specific implementation.
Parameters
----------
term : str
The term to get the details for
exp : Experiment
The calour experiment for showing the term details in
features: list of str
The features in the experiment for which to show the term details
Returns
-------
'''
logger.debug('Generic function for term details')
return None
| bsd-3-clause |
AlexanderFabisch/scikit-learn | sklearn/feature_selection/__init__.py | 33 | 1159 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
from .from_model import SelectFromModel
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression',
'SelectFromModel']
| bsd-3-clause |
moble/sympy | sympy/plotting/plot.py | 55 | 64797 | """Plotting module for Sympy.
A plot is represented by the ``Plot`` class that contains a reference to the
backend and a list of the data series to be plotted. The data series are
instances of classes meant to simplify getting points and meshes from sympy
expressions. ``plot_backends`` is a dictionary with all the backends.
This module gives only the essential. For all the fancy stuff use directly
the backend. You can get the backend wrapper for every plot from the
``_backend`` attribute. Moreover the data series classes have various useful
methods like ``get_points``, ``get_segments``, ``get_meshes``, etc, that may
be useful if you wish to use another plotting library.
Especially if you need publication ready graphs and this module is not enough
for you - just get the ``_backend`` attribute and add whatever you want
directly to it. In the case of matplotlib (the common way to graph data in
python) just copy ``_backend.fig`` which is the figure and ``_backend.ax``
which is the axis and work on them as you would on any other matplotlib object.
Simplicity of code takes much greater importance than performance. Don't use it
if you care at all about performance. A new backend instance is initialized
every time you call ``show()`` and the old one is left to the garbage collector.
"""
from __future__ import print_function, division
from inspect import getargspec
from collections import Callable
import warnings
from sympy import sympify, Expr, Tuple, Dummy, Symbol
from sympy.external import import_module
from sympy.core.compatibility import range
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import is_sequence
from .experimental_lambdify import (vectorized_lambdify, lambdify)
# N.B.
# When changing the minimum module version for matplotlib, please change
# the same in the `SymPyDocTestFinder`` in `sympy/utilities/runtests.py`
# Backend specific imports - textplot
from sympy.plotting.textplot import textplot
# Global variable
# Set to False when running tests / doctests so that the plots don't show.
_show = True
def unset_show():
global _show
_show = False
##############################################################################
# The public interface
##############################################################################
class Plot(object):
"""The central class of the plotting module.
For interactive work the function ``plot`` is better suited.
This class permits the plotting of sympy expressions using numerous
backends (matplotlib, textplot, the old pyglet module for sympy, Google
charts api, etc).
The figure can contain an arbitrary number of plots of sympy expressions,
lists of coordinates of points, etc. Plot has a private attribute _series that
contains all data series to be plotted (expressions for lines or surfaces,
lists of points, etc (all subclasses of BaseSeries)). Those data series are
instances of classes not imported by ``from sympy import *``.
The customization of the figure is on two levels. Global options that
concern the figure as a whole (eg title, xlabel, scale, etc) and
per-data series options (eg name) and aesthetics (eg. color, point shape,
line type, etc.).
The difference between options and aesthetics is that an aesthetic can be
a function of the coordinates (or parameters in a parametric plot). The
supported values for an aesthetic are:
- None (the backend uses default values)
- a constant
- a function of one variable (the first coordinate or parameter)
- a function of two variables (the first and second coordinate or
parameters)
- a function of three variables (only in nonparametric 3D plots)
Their implementation depends on the backend so they may not work in some
backends.
If the plot is parametric and the arity of the aesthetic function permits
it the aesthetic is calculated over parameters and not over coordinates.
If the arity does not permit calculation over parameters the calculation is
done over coordinates.
Only cartesian coordinates are supported for the moment, but you can use
the parametric plots to plot in polar, spherical and cylindrical
coordinates.
The arguments for the constructor Plot must be subclasses of BaseSeries.
Any global option can be specified as a keyword argument.
The global options for a figure are:
- title : str
- xlabel : str
- ylabel : str
- legend : bool
- xscale : {'linear', 'log'}
- yscale : {'linear', 'log'}
- axis : bool
- axis_center : tuple of two floats or {'center', 'auto'}
- xlim : tuple of two floats
- ylim : tuple of two floats
- aspect_ratio : tuple of two floats or {'auto'}
- autoscale : bool
- margin : float in [0, 1]
The per data series options and aesthetics are:
There are none in the base series. See below for options for subclasses.
Some data series support additional aesthetics or options:
ListSeries, LineOver1DRangeSeries, Parametric2DLineSeries,
Parametric3DLineSeries support the following:
Aesthetics:
- line_color : function which returns a float.
options:
- label : str
- steps : bool
- integers_only : bool
SurfaceOver2DRangeSeries, ParametricSurfaceSeries support the following:
aesthetics:
- surface_color : function which returns a float.
"""
def __init__(self, *args, **kwargs):
super(Plot, self).__init__()
# Options for the graph as a whole.
# The possible values for each option are described in the docstring of
# Plot. They are based purely on convention, no checking is done.
self.title = None
self.xlabel = None
self.ylabel = None
self.aspect_ratio = 'auto'
self.xlim = None
self.ylim = None
self.axis_center = 'auto'
self.axis = True
self.xscale = 'linear'
self.yscale = 'linear'
self.legend = False
self.autoscale = True
self.margin = 0
# Contains the data objects to be plotted. The backend should be smart
# enough to iterate over this list.
self._series = []
self._series.extend(args)
# The backend type. On every show() a new backend instance is created
# in self._backend which is tightly coupled to the Plot instance
# (thanks to the parent attribute of the backend).
self.backend = DefaultBackend
# The keyword arguments should only contain options for the plot.
for key, val in kwargs.items():
if hasattr(self, key):
setattr(self, key, val)
def show(self):
# TODO move this to the backend (also for save)
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
series_strs = [('[%d]: ' % i) + str(s)
for i, s in enumerate(self._series)]
return 'Plot object containing:\n' + '\n'.join(series_strs)
def __getitem__(self, index):
return self._series[index]
def __setitem__(self, index, *args):
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series[index] = args
def __delitem__(self, index):
del self._series[index]
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def append(self, arg):
"""Adds an element from a plot's series to an existing plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot's first series object to the first, use the
``append`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.append(p2[0])
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
See Also
========
extend
"""
if isinstance(arg, BaseSeries):
self._series.append(arg)
else:
raise TypeError('Must specify element of plot to append.')
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def extend(self, arg):
"""Adds all series from another plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot to the first, use the ``extend`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.extend(p2)
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
"""
if isinstance(arg, Plot):
self._series.extend(arg._series)
elif is_sequence(arg):
self._series.extend(arg)
else:
raise TypeError('Expecting Plot or sequence of BaseSeries')
##############################################################################
# Data Series
##############################################################################
#TODO more general way to calculate aesthetics (see get_color_array)
### The base class for all series
class BaseSeries(object):
"""Base class for the data objects containing stuff to be plotted.
The backend should check if it supports the data series that it's given.
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of
data series that it's given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (eg. The LineOver1DRange belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dsurface = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_contour = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_implicit = False
# Some of the backends expect:
# - get_meshes returning mesh_x (1D array), mesh_y(1D array,
# mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
#Different from is_contour as the colormap in backend will be
#different
is_parametric = False
# The calculation of aesthetics expects:
# - get_parameter_points returning one or two np.arrays (1D or 2D)
# used for calculation aesthetics
def __init__(self):
super(BaseSeries, self).__init__()
@property
def is_3D(self):
flags3D = [
self.is_3Dline,
self.is_3Dsurface
]
return any(flags3D)
@property
def is_line(self):
flagslines = [
self.is_2Dline,
self.is_3Dline
]
return any(flagslines)
### 2D lines
class Line2DBaseSeries(BaseSeries):
"""A base class for 2D lines.
- adding the label, steps and only_integers options
- making is_2Dline true
- defining get_segments and get_color_array
"""
is_2Dline = True
_dim = 2
def __init__(self):
super(Line2DBaseSeries, self).__init__()
self.label = None
self.steps = False
self.only_integers = False
self.line_color = None
def get_segments(self):
np = import_module('numpy')
points = self.get_points()
if self.steps is True:
x = np.array((points[0], points[0])).T.flatten()[1:]
y = np.array((points[1], points[1])).T.flatten()[:-1]
points = (x, y)
points = np.ma.array(points).T.reshape(-1, 1, self._dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def get_color_array(self):
np = import_module('numpy')
c = self.line_color
if hasattr(c, '__call__'):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if arity == 1 and self.is_parametric:
x = self.get_parameter_points()
return f(centers_of_segments(x))
else:
variables = list(map(centers_of_segments, self.get_points()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else: # only if the line is 3D (otherwise raises an error)
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class List2DSeries(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y):
np = import_module('numpy')
super(List2DSeries, self).__init__()
self.list_x = np.array(list_x)
self.list_y = np.array(list_y)
self.label = 'list'
def __str__(self):
return 'list plot'
def get_points(self):
return (self.list_x, self.list_y)
class LineOver1DRangeSeries(Line2DBaseSeries):
"""Representation for a line consisting of a SymPy expression over a range."""
def __init__(self, expr, var_start_end, **kwargs):
super(LineOver1DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.label = str(self.expr)
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'cartesian line: %s for %s over %s' % (
str(self.expr), str(self.var), str((self.start, self.end)))
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if self.only_integers or not self.adaptive:
return super(LineOver1DRangeSeries, self).get_segments()
else:
f = lambdify([self.var], self.expr)
list_segments = []
def sample(p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
np = import_module('numpy')
#Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif p[1] is None and q[1] is None:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray))
if any(y is not None for y in yarray):
for i in range(len(yarray) - 1):
if yarray[i] is not None or yarray[i + 1] is not None:
sample([xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]], depth + 1)
#Sample further if one of the end points in None( i.e. a complex
#value) or the three points are not almost collinear.
elif (p[1] is None or q[1] is None or new_point[1] is None
or not flat(p, new_point, q)):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start = f(self.start)
f_end = f(self.end)
sample([self.start, f_start], [self.end, f_end], 0)
return list_segments
def get_points(self):
np = import_module('numpy')
if self.only_integers is True:
list_x = np.linspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
list_x = np.linspace(self.start, self.end, num=self.nb_of_points)
f = vectorized_lambdify([self.var], self.expr)
list_y = f(list_x)
return (list_x, list_y)
class Parametric2DLineSeries(Line2DBaseSeries):
"""Representation for a line consisting of two parametric sympy expressions
over a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, var_start_end, **kwargs):
super(Parametric2DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'parametric cartesian line: (%s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.var),
str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
list_x = fx(param)
list_y = fy(param)
return (list_x, list_y)
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if not self.adaptive:
return super(Parametric2DLineSeries, self).get_segments()
f_x = lambdify([self.var], self.expr_x)
f_y = lambdify([self.var], self.expr_y)
list_segments = []
def sample(param_p, param_q, p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
#Randomly sample to avoid aliasing.
np = import_module('numpy')
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = f_x(param_new)
ynew = f_y(param_new)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif ((p[0] is None and q[1] is None) or
(p[1] is None and q[1] is None)):
param_array = np.linspace(param_p, param_q, 10)
x_array = list(map(f_x, param_array))
y_array = list(map(f_y, param_array))
if any(x is not None and y is not None
for x, y in zip(x_array, y_array)):
for i in range(len(y_array) - 1):
if ((x_array[i] is not None and y_array[i] is not None) or
(x_array[i + 1] is not None and y_array[i + 1] is not None)):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(param_array[i], param_array[i], point_a,
point_b, depth + 1)
#Sample further if one of the end points in None( ie a complex
#value) or the three points are not almost collinear.
elif (p[0] is None or p[1] is None
or q[1] is None or q[0] is None
or not flat(p, new_point, q)):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start_x = f_x(self.start)
f_start_y = f_y(self.start)
start = [f_start_x, f_start_y]
f_end_x = f_x(self.end)
f_end_y = f_y(self.end)
end = [f_end_x, f_end_y]
sample(self.start, self.end, start, end, 0)
return list_segments
### 3D lines
class Line3DBaseSeries(Line2DBaseSeries):
"""A base class for 3D lines.
Most of the stuff is derived from Line2DBaseSeries."""
is_2Dline = False
is_3Dline = True
_dim = 3
def __init__(self):
super(Line3DBaseSeries, self).__init__()
class Parametric3DLineSeries(Line3DBaseSeries):
"""Representation for a 3D line consisting of two parametric sympy
expressions and a range."""
def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs):
super(Parametric3DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.expr_z),
str(self.var), str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
fz = vectorized_lambdify([self.var], self.expr_z)
list_x = fx(param)
list_y = fy(param)
list_z = fz(param)
return (list_x, list_y, list_z)
### Surfaces
class SurfaceBaseSeries(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self):
super(SurfaceBaseSeries, self).__init__()
self.surface_color = None
def get_color_array(self):
np = import_module('numpy')
c = self.surface_color
if isinstance(c, Callable):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if self.is_parametric:
variables = list(map(centers_of_faces, self.get_parameter_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables)
variables = list(map(centers_of_faces, self.get_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else:
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class SurfaceOver2DRangeSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of a sympy expression and 2D
range."""
def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs):
super(SurfaceOver2DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.nb_of_points_x = kwargs.get('nb_of_points_x', 50)
self.nb_of_points_y = kwargs.get('nb_of_points_y', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('cartesian surface: %s for'
' %s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
class ParametricSurfaceSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of three parametric sympy
expressions and a range."""
is_parametric = True
def __init__(
self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v,
**kwargs):
super(ParametricSurfaceSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.var_u = sympify(var_start_end_u[0])
self.start_u = float(var_start_end_u[1])
self.end_u = float(var_start_end_u[2])
self.var_v = sympify(var_start_end_v[0])
self.start_v = float(var_start_end_v[1])
self.end_v = float(var_start_end_v[2])
self.nb_of_points_u = kwargs.get('nb_of_points_u', 50)
self.nb_of_points_v = kwargs.get('nb_of_points_v', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('parametric cartesian surface: (%s, %s, %s) for'
' %s over %s and %s over %s') % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var_u),
str((self.start_u, self.end_u)),
str(self.var_v),
str((self.start_v, self.end_v)))
def get_parameter_meshes(self):
np = import_module('numpy')
return np.meshgrid(np.linspace(self.start_u, self.end_u,
num=self.nb_of_points_u),
np.linspace(self.start_v, self.end_v,
num=self.nb_of_points_v))
def get_meshes(self):
mesh_u, mesh_v = self.get_parameter_meshes()
fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x)
fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y)
fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z)
return (fx(mesh_u, mesh_v), fy(mesh_u, mesh_v), fz(mesh_u, mesh_v))
### Contours
class ContourSeries(BaseSeries):
"""Representation for a contour plot."""
#The code is mostly repetition of SurfaceOver2DRange.
#XXX: Presently not used in any of those functions.
#XXX: Add contour plot and use this seties.
is_contour = True
def __init__(self, expr, var_start_end_x, var_start_end_y):
super(ContourSeries, self).__init__()
self.nb_of_points_x = 50
self.nb_of_points_y = 50
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_meshes
def __str__(self):
return ('contour: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
##############################################################################
# Backends
##############################################################################
class BaseBackend(object):
def __init__(self, parent):
super(BaseBackend, self).__init__()
self.parent = parent
## don't have to check for the success of importing matplotlib in each case;
## we will only be using this backend if we can successfully import matploblib
class MatplotlibBackend(BaseBackend):
def __init__(self, parent):
super(MatplotlibBackend, self).__init__(parent)
are_3D = [s.is_3D for s in self.parent._series]
self.matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['pyplot', 'cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
self.plt = self.matplotlib.pyplot
self.cm = self.matplotlib.cm
self.LineCollection = self.matplotlib.collections.LineCollection
if any(are_3D) and not all(are_3D):
raise ValueError('The matplotlib backend can not mix 2D and 3D.')
elif not any(are_3D):
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.spines['left'].set_position('zero')
self.ax.spines['right'].set_color('none')
self.ax.spines['bottom'].set_position('zero')
self.ax.spines['top'].set_color('none')
self.ax.spines['left'].set_smart_bounds(True)
self.ax.spines['bottom'].set_smart_bounds(False)
self.ax.xaxis.set_ticks_position('bottom')
self.ax.yaxis.set_ticks_position('left')
elif all(are_3D):
## mpl_toolkits.mplot3d is necessary for
## projection='3d'
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111, projection='3d')
def process_series(self):
parent = self.parent
for s in self.parent._series:
# Create the collections
if s.is_2Dline:
collection = self.LineCollection(s.get_segments())
self.ax.add_collection(collection)
elif s.is_contour:
self.ax.contour(*s.get_meshes())
elif s.is_3Dline:
# TODO too complicated, I blame matplotlib
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
art3d = mpl_toolkits.mplot3d.art3d
collection = art3d.Line3DCollection(s.get_segments())
self.ax.add_collection(collection)
x, y, z = s.get_points()
self.ax.set_xlim((min(x), max(x)))
self.ax.set_ylim((min(y), max(y)))
self.ax.set_zlim((min(z), max(z)))
elif s.is_3Dsurface:
x, y, z = s.get_meshes()
collection = self.ax.plot_surface(x, y, z, cmap=self.cm.jet,
rstride=1, cstride=1,
linewidth=0.1)
elif s.is_implicit:
#Smart bounds have to be set to False for implicit plots.
self.ax.spines['left'].set_smart_bounds(False)
self.ax.spines['bottom'].set_smart_bounds(False)
points = s.get_raster()
if len(points) == 2:
#interval math plotting
x, y = _matplotlib_list(points[0])
self.ax.fill(x, y, facecolor=s.line_color, edgecolor='None')
else:
# use contourf or contour depending on whether it is
# an inequality or equality.
#XXX: ``contour`` plots multiple lines. Should be fixed.
ListedColormap = self.matplotlib.colors.ListedColormap
colormap = ListedColormap(["white", s.line_color])
xarray, yarray, zarray, plot_type = points
if plot_type == 'contour':
self.ax.contour(xarray, yarray, zarray,
contours=(0, 0), fill=False, cmap=colormap)
else:
self.ax.contourf(xarray, yarray, zarray, cmap=colormap)
else:
raise ValueError('The matplotlib backend supports only '
'is_2Dline, is_3Dline, is_3Dsurface and '
'is_contour objects.')
# Customise the collections with the corresponding per-series
# options.
if hasattr(s, 'label'):
collection.set_label(s.label)
if s.is_line and s.line_color:
if isinstance(s.line_color, (float, int)) or isinstance(s.line_color, Callable):
color_array = s.get_color_array()
collection.set_array(color_array)
else:
collection.set_color(s.line_color)
if s.is_3Dsurface and s.surface_color:
if self.matplotlib.__version__ < "1.2.0": # TODO in the distant future remove this check
warnings.warn('The version of matplotlib is too old to use surface coloring.')
elif isinstance(s.surface_color, (float, int)) or isinstance(s.surface_color, Callable):
color_array = s.get_color_array()
color_array = color_array.reshape(color_array.size)
collection.set_array(color_array)
else:
collection.set_color(s.surface_color)
# Set global options.
# TODO The 3D stuff
# XXX The order of those is important.
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
Axes3D = mpl_toolkits.mplot3d.Axes3D
if parent.xscale and not isinstance(self.ax, Axes3D):
self.ax.set_xscale(parent.xscale)
if parent.yscale and not isinstance(self.ax, Axes3D):
self.ax.set_yscale(parent.yscale)
if parent.xlim:
self.ax.set_xlim(parent.xlim)
else:
if all(isinstance(s, LineOver1DRangeSeries) for s in parent._series):
starts = [s.start for s in parent._series]
ends = [s.end for s in parent._series]
self.ax.set_xlim(min(starts), max(ends))
if parent.ylim:
self.ax.set_ylim(parent.ylim)
if not isinstance(self.ax, Axes3D) or self.matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check
self.ax.set_autoscale_on(parent.autoscale)
if parent.axis_center:
val = parent.axis_center
if isinstance(self.ax, Axes3D):
pass
elif val == 'center':
self.ax.spines['left'].set_position('center')
self.ax.spines['bottom'].set_position('center')
elif val == 'auto':
xl, xh = self.ax.get_xlim()
yl, yh = self.ax.get_ylim()
pos_left = ('data', 0) if xl*xh <= 0 else 'center'
pos_bottom = ('data', 0) if yl*yh <= 0 else 'center'
self.ax.spines['left'].set_position(pos_left)
self.ax.spines['bottom'].set_position(pos_bottom)
else:
self.ax.spines['left'].set_position(('data', val[0]))
self.ax.spines['bottom'].set_position(('data', val[1]))
if not parent.axis:
self.ax.set_axis_off()
if parent.legend:
if self.ax.legend():
self.ax.legend_.set_visible(parent.legend)
if parent.margin:
self.ax.set_xmargin(parent.margin)
self.ax.set_ymargin(parent.margin)
if parent.title:
self.ax.set_title(parent.title)
if parent.xlabel:
self.ax.set_xlabel(parent.xlabel, position=(1, 0))
if parent.ylabel:
self.ax.set_ylabel(parent.ylabel, position=(0, 1))
def show(self):
self.process_series()
#TODO after fixing https://github.com/ipython/ipython/issues/1255
# you can uncomment the next line and remove the pyplot.show() call
#self.fig.show()
if _show:
self.plt.show()
def save(self, path):
self.process_series()
self.fig.savefig(path)
def close(self):
self.plt.close(self.fig)
class TextBackend(BaseBackend):
def __init__(self, parent):
super(TextBackend, self).__init__(parent)
def show(self):
if len(self.parent._series) != 1:
raise ValueError(
'The TextBackend supports only one graph per Plot.')
elif not isinstance(self.parent._series[0], LineOver1DRangeSeries):
raise ValueError(
'The TextBackend supports only expressions over a 1D range')
else:
ser = self.parent._series[0]
textplot(ser.expr, ser.start, ser.end)
def close(self):
pass
class DefaultBackend(BaseBackend):
def __new__(cls, parent):
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
return MatplotlibBackend(parent)
else:
return TextBackend(parent)
plot_backends = {
'matplotlib': MatplotlibBackend,
'text': TextBackend,
'default': DefaultBackend
}
##############################################################################
# Finding the centers of line segments or mesh faces
##############################################################################
def centers_of_segments(array):
np = import_module('numpy')
return np.average(np.vstack((array[:-1], array[1:])), 0)
def centers_of_faces(array):
np = import_module('numpy')
return np.average(np.dstack((array[:-1, :-1],
array[1:, :-1],
array[:-1, 1: ],
array[:-1, :-1],
)), 2)
def flat(x, y, z, eps=1e-3):
"""Checks whether three points are almost collinear"""
np = import_module('numpy')
# Workaround plotting piecewise (#8577):
# workaround for `lambdify` in `.experimental_lambdify` fails
# to return numerical values in some cases. Lower-level fix
# in `lambdify` is possible.
vector_a = (x - y).astype(np.float)
vector_b = (z - y).astype(np.float)
dot_product = np.dot(vector_a, vector_b)
vector_a_norm = np.linalg.norm(vector_a)
vector_b_norm = np.linalg.norm(vector_b)
cos_theta = dot_product / (vector_a_norm * vector_b_norm)
return abs(cos_theta + 1) < eps
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend([intervalx.start, intervalx.start,
intervalx.end, intervalx.end, None])
ylist.extend([intervaly.start, intervaly.end,
intervaly.end, intervaly.start, None])
else:
#XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend([None, None, None, None])
ylist.extend([None, None, None, None])
return xlist, ylist
####New API for plotting module ####
# TODO: Add color arrays for plots.
# TODO: Add more plotting options for 3d plots.
# TODO: Adaptive sampling for 3D plots.
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot(*args, **kwargs):
"""
Plots a function of a single variable and returns an instance of
the ``Plot`` class (also, see the description of the
``show`` keyword argument below).
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single Plot
``plot(expr, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot(expr1, expr2, ..., range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot((expr1, range), (expr2, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function of single variable
``range``: (x, 0, 5), A 3-tuple denoting the range of the free variable.
Keyword Arguments
=================
Arguments for ``plot`` function:
``show``: Boolean. The default value is set to ``True``. Set show to
``False`` and the function will not display the plot. The returned
instance of the ``Plot`` class can then be used to save or display
the plot by calling the ``save()`` and ``show()`` methods
respectively.
Arguments for ``LineOver1DRangeSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to False and
specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of value ``n``
samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The function
is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics options:
``line_color``: float. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
If there are multiple plots, then the same series series are applied to
all the plots. If you want to set these options separately, you can index
the ``Plot`` object returned and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot. It is set to the latex representation of
the expression, if the plot has only one expression.
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
Single Plot
>>> plot(x**2, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x**2 for x over (-5.0, 5.0)
Multiple plots with single range.
>>> plot(x, x**2, x**3, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
No adaptive sampling.
>>> plot(x**2, adaptive=False, nb_of_points=400)
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
See Also
========
Plot, LineOver1DRangeSeries.
"""
args = list(map(sympify, args))
free = set()
for a in args:
if isinstance(a, Expr):
free |= a.free_symbols
if len(free) > 1:
raise ValueError(
'The same variable should be used in all '
'univariate expressions being plotted.')
x = free.pop() if free else Symbol('x')
kwargs.setdefault('xlabel', x.name)
kwargs.setdefault('ylabel', 'f(%s)' % x.name)
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 1)
series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot_parametric(*args, **kwargs):
"""
Plots a 2D parametric plot.
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single plot.
``plot_parametric(expr_x, expr_y, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot_parametric((expr1_x, expr1_y), (expr2_x, expr2_y), range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot_parametric((expr_x, expr_y, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``range``: (u, 0, 5), A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric2DLineSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to
False and specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of
value ``n`` samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The
function is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics
----------
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same Series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center
or {'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot_parametric
>>> u = symbols('u')
Single Parametric plot
>>> plot_parametric(cos(u), sin(u), (u, -5, 5))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
Multiple parametric plot with single range.
>>> plot_parametric((cos(u), sin(u)), (u, cos(u)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-10.0, 10.0)
[1]: parametric cartesian line: (u, cos(u)) for u over (-10.0, 10.0)
Multiple parametric plots.
>>> plot_parametric((cos(u), sin(u), (u, -5, 5)),
... (cos(u), u, (u, -5, 5)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
[1]: parametric cartesian line: (cos(u), u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric2DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 2, 1)
series = [Parametric2DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_line(*args, **kwargs):
"""
Plots a 3D parametric line plot.
Usage
=====
Single plot:
``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``expr_z`` : Expression representing the function along z.
``range``: ``(u, 0, 5)``, A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric3DLineSeries`` class.
``nb_of_points``: The range is uniformly sampled at ``nb_of_points``
number of points.
Aesthetics:
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class.
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_line
>>> u = symbols('u')
Single plot.
>>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
Multiple plots.
>>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)),
... (sin(u), u**2, u, (u, -5, 5)))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
[1]: 3D parametric cartesian line: (sin(u), u**2, u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric3DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 1)
series = [Parametric3DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d(*args, **kwargs):
"""
Plots a 3D surface plot.
Usage
=====
Single plot
``plot3d(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot3d(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``SurfaceOver2DRangeSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot3d
>>> x, y = symbols('x y')
Single plot
>>> plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with same range
>>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: -x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)),
... (x*y, (x, -3, 3), (y, -3, 3)))
Plot object containing:
[0]: cartesian surface: x**2 + y**2 for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: x*y for x over (-3.0, 3.0) and y over (-3.0, 3.0)
See Also
========
Plot, SurfaceOver2DRangeSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 2)
series = [SurfaceOver2DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_surface(*args, **kwargs):
"""
Plots a 3D parametric surface plot.
Usage
=====
Single plot.
``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)``
If the ranges is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x``: Expression representing the function along ``x``.
``expr_y``: Expression representing the function along ``y``.
``expr_z``: Expression representing the function along ``z``.
``range_u``: ``(u, 0, 5)``, A 3-tuple denoting the range of the ``u``
variable.
``range_v``: ``(v, 0, 5)``, A 3-tuple denoting the range of the v
variable.
Keyword Arguments
=================
Arguments for ``ParametricSurfaceSeries`` class:
``nb_of_points_u``: int. The ``u`` range is sampled uniformly at
``nb_of_points_v`` of points
``nb_of_points_y``: int. The ``v`` range is sampled uniformly at
``nb_of_points_y`` of points
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied for
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_surface
>>> u, v = symbols('u v')
Single plot.
>>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v,
... (u, -5, 5), (v, -5, 5))
Plot object containing:
[0]: parametric cartesian surface: (cos(u + v), sin(u - v), u - v) for u over (-5.0, 5.0) and v over (-5.0, 5.0)
See Also
========
Plot, ParametricSurfaceSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 2)
series = [ParametricSurfaceSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def check_arguments(args, expr_len, nb_of_free_symbols):
"""
Checks the arguments and converts into tuples of the
form (exprs, ranges)
Examples
========
>>> from sympy import plot, cos, sin, symbols
>>> from sympy.plotting.plot import check_arguments
>>> x = symbols('x')
>>> check_arguments([cos(x), sin(x)], 2, 1)
[(cos(x), sin(x), (x, -10, 10))]
>>> check_arguments([x, x**2], 1, 1)
[(x, (x, -10, 10)), (x**2, (x, -10, 10))]
"""
if expr_len > 1 and isinstance(args[0], Expr):
# Multiple expressions same range.
# The arguments are tuples when the expression length is
# greater than 1.
if len(args) < expr_len:
raise ValueError("len(args) should not be less than expr_len")
for i in range(len(args)):
if isinstance(args[i], Tuple):
break
else:
i = len(args) + 1
exprs = Tuple(*args[:i])
free_symbols = list(set().union(*[e.free_symbols for e in exprs]))
if len(args) == expr_len + nb_of_free_symbols:
#Ranges given
plots = [exprs + Tuple(*args[expr_len:])]
else:
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
plots = [exprs + Tuple(*ranges)]
return plots
if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and
len(args[0]) == expr_len and
expr_len != 3):
# Cannot handle expressions with number of expression = 3. It is
# not possible to differentiate between expressions and ranges.
#Series of plots with same range
for i in range(len(args)):
if isinstance(args[i], Tuple) and len(args[i]) != expr_len:
break
if not isinstance(args[i], Tuple):
args[i] = Tuple(args[i])
else:
i = len(args) + 1
exprs = args[:i]
assert all(isinstance(e, Expr) for expr in exprs for e in expr)
free_symbols = list(set().union(*[e.free_symbols for expr in exprs
for e in expr]))
if len(free_symbols) > nb_of_free_symbols:
raise ValueError("The number of free_symbols in the expression "
"is greater than %d" % nb_of_free_symbols)
if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple):
ranges = Tuple(*[range_expr for range_expr in args[
i:i + nb_of_free_symbols]])
plots = [expr + ranges for expr in exprs]
return plots
else:
#Use default ranges.
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
ranges = Tuple(*ranges)
plots = [expr + ranges for expr in exprs]
return plots
elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols:
#Multiple plots with different ranges.
for arg in args:
for i in range(expr_len):
if not isinstance(arg[i], Expr):
raise ValueError("Expected an expression, given %s" %
str(arg[i]))
for i in range(nb_of_free_symbols):
if not len(arg[i + expr_len]) == 3:
raise ValueError("The ranges should be a tuple of "
"length 3, got %s" % str(arg[i + expr_len]))
return args
| bsd-3-clause |
tristandeleu/switching-kalman-filter | test/test_skf_toyexample.py | 1 | 4797 | import numpy as np
from utils.kalman import SwitchingKalmanState, SwitchingKalmanFilter, KalmanFilter, KalmanState
from utils.kalman.models import NDCWPA, NDBrownian
import matplotlib.pyplot as plt
def angle_between(x,y):
return min(y-x, y-x+2*np.pi, y-x-2*np.pi, key=np.abs)
class ManeuveringTarget(object):
def __init__(self, x0, y0, v0, heading):
self.x = x0
self.y = y0
self.vel = v0
self.hdg = heading
self.cmd_vel = v0
self.cmd_hdg = heading
self.vel_step = 0
self.hdg_step = 0
self.vel_delta = 0
self.hdg_delta = 0
self.stop_step = 0
def update(self):
if self.stop_step > 0:
self.stop_step -= 1
# return np.array([self.x, self.y])
else:
vx = self.vel * np.cos(self.hdg)
vy = self.vel * np.sin(self.hdg)
self.x += vx
self.y += vy
if self.hdg_step > 0:
self.hdg_step -= 1
self.hdg += self.hdg_delta
if self.vel_step > 0:
self.vel_step -= 1
self.vel += self.vel_delta
return np.array([self.x, self.y])
def set_commanded_heading(self, hdg_degrees, steps):
self.cmd_hdg = hdg_degrees
self.hdg_delta = angle_between(self.cmd_hdg, self.hdg) / steps
if abs(self.hdg_delta) > 0:
self.hdg_step = steps
else:
self.hdg_step = 0
def set_commanded_speed(self, speed, steps):
self.cmd_vel = speed
self.vel_delta = (self.cmd_vel - self.vel) / steps
if abs(self.vel_delta) > 0:
self.vel_step = steps
else:
self.vel_step = 0
def set_commanded_stop(self, steps):
self.stop_step = steps
n = 200
t = ManeuveringTarget(x0=0, y0=0, v0=3, heading=np.pi/4)
positions = np.zeros((n, 2))
Q = np.random.randn(n, 2) * 0.2
for i in xrange(100):
positions[i, :] = t.update()
t.set_commanded_stop(50)
t.set_commanded_heading(np.pi / 2, 50)
for i in xrange(100):
positions[100 + i,:] = t.update()
positions += Q
state = KalmanState(mean=np.zeros(6), covariance=10.0 * np.eye(6))
model = NDCWPA(dt=1.0, q=2e-2, r=10.0, n_dim=2)
kalman = KalmanFilter(model=model)
filtered_states_kf = [state] * n
for i in xrange(n):
observation = positions[i]
state = kalman.filter(state, observation)
filtered_states_kf[i] = state
smoothed_states_kf = [state] * n
for i in xrange(1, n):
j = n - 1 - i
state = kalman.smoother(filtered_states_kf[j], state)
smoothed_states_kf[j] = state
models = [
NDCWPA(dt=1.0, q=2e-2, r=10.0, n_dim=2),
NDBrownian(dt=1.0, q=2e-2, r=10.0, n_dim=2)
]
Z = np.log(np.asarray([
[0.99, 0.01],
[0.01, 0.99]
]))
masks = [
np.array([
np.diag([1, 0, 1, 0, 1, 0]),
np.diag([0, 1, 0, 1, 0, 1])
]),
np.array([
np.diag([1, 0]),
np.diag([0, 1])
])
]
T = np.kron(np.array([1, 0, 0]), np.eye(2))
embeds = [
[np.eye(6), T],
[T.T, np.eye(2)]
]
kalman = SwitchingKalmanFilter(models=models, log_transmat=Z, masks=masks, embeds=embeds)
state = SwitchingKalmanState(n_models=2)
state._states[0] = KalmanState(mean=np.zeros(6), covariance=10.0 * np.eye(6))
state._states[1] = KalmanState(mean=np.zeros(2), covariance=10.0 * np.eye(2))
state.M = np.ones(2) / 2.0
filtered_states_skf = [state] * n
for i in xrange(n):
observation = positions[i]
state = kalman.filter(state, observation)
filtered_states_skf[i] = state
smoothed_states_skf = [state] * n
for i in xrange(1, n):
j = n - 1 - i
state = kalman.smoother(state, filtered_states_skf[j])
smoothed_states_skf[j] = state
display_smoothed = True
if display_smoothed:
output_states_skf = smoothed_states_skf
output_states_kf = smoothed_states_kf
else:
output_states_skf = filtered_states_skf
output_states_kf = filtered_states_kf
smoothed_collapsed = map(lambda state: state.collapse([np.eye(6), T.T]), output_states_skf)
smoothed_skf = np.asarray(map(lambda state: state.m, smoothed_collapsed))
smoothed_kf = np.asarray(map(lambda state: state.x(), output_states_kf))
stops = np.asarray(map(lambda state: np.exp(state.M[1]), output_states_skf))
subplot_shape = (2,2)
plt.subplot2grid(subplot_shape, (0,0))
plt.plot(positions[:,0], positions[:,1], 'b-')
plt.plot(smoothed_skf[:,0], smoothed_skf[:,1], 'g-')
plt.plot(smoothed_skf[stops>0.50,0], smoothed_skf[stops>0.50,1], 'ro')
plt.subplot2grid(subplot_shape, (1,0))
plt.plot(positions[:,0], positions[:,1], 'b-')
plt.plot(smoothed_kf[:,0], smoothed_kf[:,1], 'g-')
plt.subplot2grid(subplot_shape, (0,1), rowspan=2)
plt.plot(range(n), stops)
plt.plot(range(n), 0.5 * np.ones(n), 'r--')
plt.show() | mit |
buntyke/Python | Plotting/plot1.py | 1 | 1694 | #!/usr/bin/env
import matplotlib.pyplot as plt
from numpy import genfromtxt
import numpy as np
plt.rcParams.update({'font.size': '18'})
data = genfromtxt('data1',delimiter=',')
xData = data[:,0]
yData = data[:,1]
zData = np.polyfit(xData, yData, 1)
l1 = np.poly1d(zData)
zData = np.polyfit(xData, yData, 2)
l2 = np.poly1d(zData)
xIntp = np.linspace(300,2300,20)
qPoint = 1250
xDraw1 = np.array([qPoint, qPoint, 0])
yDraw1 = np.array([0, l1(qPoint), l1(qPoint)])
xDraw2 = np.array([qPoint, qPoint, 0])
yDraw2 = np.array([0, l2(qPoint), l2(qPoint)])
plt.xkcd()
plt.figure(0)
plt.plot(xData,yData,'rx',ms=10,mew=3,lw=2)
plt.axis([0, 2500, 0, 400])
plt.xlabel('Size in meter2',fontsize=20)
plt.ylabel('Price in 10,000 Yen',fontsize=20)
plt.title('Housing Price Prediction',fontsize=20)
plt.annotate('Input', xy=(qPoint, 100), xytext=(qPoint, 20), arrowprops = dict(facecolor='green'))
plt.show()
plt.figure(1)
plt.plot(xData,yData,'rx',xIntp,l1(xIntp),'c-',xDraw1,yDraw1,'c--',ms=10,mew=3,lw=2)
plt.axis([0, 2500, 0, 400])
plt.xlabel('Size in meter2',fontsize=20)
plt.ylabel('Price in 10,000 Yen',fontsize=20)
plt.title('Housing Price Prediction',fontsize=20)
plt.annotate('Input', xy=(qPoint, 100), xytext=(qPoint, 20), arrowprops = dict(facecolor='green'))
plt.show()
plt.figure(2)
plt.plot(xData,yData,'rx',xIntp,l1(xIntp),'c-',xIntp,l2(xIntp),'m-',xDraw1,yDraw1,'c--',xDraw2,yDraw2,'m--',ms=10,mew=3,lw=2)
plt.axis([0, 2500, 0, 400])
plt.xlabel('Size in meter2',fontsize=20)
plt.ylabel('Price in 10,000 Yen',fontsize=20)
plt.title('Housing Price Prediction',fontsize=20)
plt.annotate('Input', xy=(qPoint, 100), xytext=(qPoint, 20), arrowprops = dict(facecolor='green'))
plt.show()
| mit |
dhuppenkothen/stingray | stingray/io.py | 1 | 21680 | from __future__ import (absolute_import, unicode_literals, division,
print_function)
import numpy as np
import logging
import warnings
import os
import stingray.utils as utils
from .utils import order_list_of_arrays, is_string
from .utils import assign_value_if_none
try:
# Python 2
import cPickle as pickle
except:
# Python 3
import pickle
def get_file_extension(fname):
"""Get the extension from the file name."""
return os.path.splitext(fname)[1]
def high_precision_keyword_read(hdr, keyword):
"""Read FITS header keywords, also if split in two.
In the case where the keyword is split in two, like
MJDREF = MJDREFI + MJDREFF
in some missions, this function returns the summed value. Otherwise, the
content of the single keyword
Parameters
----------
hdr : dict_like
The FITS header structure, or a dictionary
keyword : str
The key to read in the header
Returns
-------
value : long double
The value of the key, or None if something went wrong
"""
try:
value = np.longdouble(hdr[keyword])
return value
except:
pass
try:
if len(keyword) == 8:
keyword = keyword[:7]
value = np.longdouble(hdr[keyword + 'I'])
value += np.longdouble(hdr[keyword + 'F'])
return value
except:
return None
def load_gtis(fits_file, gtistring=None):
"""Load GTI from HDU EVENTS of file fits_file."""
from astropy.io import fits as pf
import numpy as np
gtistring = assign_value_if_none(gtistring, 'GTI')
logging.info("Loading GTIS from file %s" % fits_file)
lchdulist = pf.open(fits_file, checksum=True)
lchdulist.verify('warn')
gtitable = lchdulist[gtistring].data
gti_list = np.array([[a, b]
for a, b in zip(gtitable.field('START'),
gtitable.field('STOP'))],
dtype=np.longdouble)
lchdulist.close()
return gti_list
def _get_gti_from_extension(lchdulist, accepted_gtistrings=['GTI']):
hdunames = [h.name for h in lchdulist]
gtiextn = [ix for ix, x in enumerate(hdunames)
if x in accepted_gtistrings][0]
gtiext = lchdulist[gtiextn]
gtitable = gtiext.data
colnames = [col.name for col in gtitable.columns.columns]
# Default: NuSTAR: START, STOP. Otherwise, try RXTE: Start, Stop
if 'START' in colnames:
startstr, stopstr = 'START', 'STOP'
else:
startstr, stopstr = 'Start', 'Stop'
gtistart = np.array(gtitable.field(startstr), dtype=np.longdouble)
gtistop = np.array(gtitable.field(stopstr), dtype=np.longdouble)
gti_list = np.array([[a, b]
for a, b in zip(gtistart,
gtistop)],
dtype=np.longdouble)
return gti_list
def _get_additional_data(lctable, additional_columns):
additional_data = {}
if additional_columns is not None:
for a in additional_columns:
try:
additional_data[a] = np.array(lctable.field(a))
except: # pragma: no cover
if a == 'PI':
logging.warning('Column PI not found. Trying with PHA')
additional_data[a] = np.array(lctable.field('PHA'))
else:
raise Exception('Column' + a + 'not found')
return additional_data
def load_events_and_gtis(fits_file, additional_columns=None,
gtistring='GTI,STDGTI',
gti_file=None, hduname='EVENTS', column='TIME'):
"""Load event lists and GTIs from one or more files.
Loads event list from HDU EVENTS of file fits_file, with Good Time
intervals. Optionally, returns additional columns of data from the same
HDU of the events.
Parameters
----------
fits_file : str
return_limits: bool, optional
Return the TSTART and TSTOP keyword values
additional_columns: list of str, optional
A list of keys corresponding to the additional columns to extract from
the event HDU (ex.: ['PI', 'X'])
Returns
-------
ev_list : array-like
gtis: [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
additional_data: dict
A dictionary, where each key is the one specified in additional_colums.
The data are an array with the values of the specified column in the
fits file.
t_start : float
t_stop : float
"""
from astropy.io import fits as pf
gtistring = assign_value_if_none(gtistring, 'GTI,STDGTI')
lchdulist = pf.open(fits_file)
# Load data table
try:
lctable = lchdulist[hduname].data
except: # pragma: no cover
logging.warning('HDU %s not found. Trying first extension' % hduname)
lctable = lchdulist[1].data
# Read event list
ev_list = np.array(lctable.field(column), dtype=np.longdouble)
# Read TIMEZERO keyword and apply it to events
try:
timezero = np.longdouble(lchdulist[1].header['TIMEZERO'])
except: # pragma: no cover
logging.warning("No TIMEZERO in file")
timezero = np.longdouble(0.)
ev_list += timezero
# Read TSTART, TSTOP from header
try:
t_start = np.longdouble(lchdulist[1].header['TSTART'])
t_stop = np.longdouble(lchdulist[1].header['TSTOP'])
except: # pragma: no cover
logging.warning("Tstart and Tstop error. using defaults")
t_start = ev_list[0]
t_stop = ev_list[-1]
# Read and handle GTI extension
accepted_gtistrings = gtistring.split(',')
if gti_file is None:
# Select first GTI with accepted name
try:
gti_list = \
_get_gti_from_extension(
lchdulist, accepted_gtistrings=accepted_gtistrings)
except: # pragma: no cover
warnings.warn("No extensions found with a valid name. "
"Please check the `accepted_gtistrings` values.")
gti_list = np.array([[t_start, t_stop]],
dtype=np.longdouble)
else:
gti_list = load_gtis(gti_file, gtistring)
additional_data = _get_additional_data(lctable, additional_columns)
lchdulist.close()
# Sort event list
order = np.argsort(ev_list)
ev_list = ev_list[order]
additional_data = order_list_of_arrays(additional_data, order)
returns = _empty()
returns.ev_list = ev_list
returns.gti_list = gti_list
returns.additional_data = additional_data
returns.t_start = t_start
returns.t_stop = t_stop
return returns
class _empty():
def __init__(self):
pass
def mkdir_p(path): # pragma: no cover
"""Safe mkdir function.
Parameters
----------
path : str
Name of the directory/ies to create
Notes
-----
Found at
http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
"""
import os
import errno
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def read_header_key(fits_file, key, hdu=1):
"""Read the header key key from HDU hdu of the file fits_file.
Parameters
----------
fits_file: str
key: str
The keyword to be read
Other Parameters
----------------
hdu : int
"""
from astropy.io import fits as pf
hdulist = pf.open(fits_file)
try:
value = hdulist[hdu].header[key]
except: # pragma: no cover
value = ''
hdulist.close()
return value
def ref_mjd(fits_file, hdu=1):
"""Read MJDREFF+ MJDREFI or, if failed, MJDREF, from the FITS header.
Parameters
----------
fits_file : str
Returns
-------
mjdref : numpy.longdouble
the reference MJD
Other Parameters
----------------
hdu : int
"""
import collections
if isinstance(fits_file, collections.Iterable) and\
not is_string(fits_file): # pragma: no cover
fits_file = fits_file[0]
logging.info("opening %s" % fits_file)
from astropy.io import fits as pf
hdulist = pf.open(fits_file)
ref_mjd_val = high_precision_keyword_read(hdulist[hdu].header, "MJDREF")
hdulist.close()
return ref_mjd_val
def common_name(str1, str2, default='common'):
"""Strip two strings of the letters not in common.
Filenames must be of same length and only differ by a few letters.
Parameters
----------
str1 : str
str2 : str
Returns
-------
common_str : str
A string containing the parts of the two names in common
Other Parameters
----------------
default : str
The string to return if common_str is empty
"""
if not len(str1) == len(str2):
return default
common_str = ''
# Extract the MP root of the name (in case they're event files)
for i, letter in enumerate(str1):
if str2[i] == letter:
common_str += letter
# Remove leading and trailing underscores and dashes
common_str = common_str.rstrip('_').rstrip('-')
common_str = common_str.lstrip('_').lstrip('-')
if common_str == '':
common_str = default
logging.debug('common_name: %s %s -> %s' % (str1, str2, common_str))
return common_str
def contiguous_regions(condition):
"""Find contiguous True regions of the boolean array "condition".
Return a 2D array where the first column is the start index of the region
and the second column is the end index.
Parameters
----------
condition : boolean array
Returns
-------
idx : [[i0_0, i0_1], [i1_0, i1_1], ...]
A list of integer couples, with the start and end of each True blocks
in the original array
Notes
-----
From http://stackoverflow.com/questions/4494404/find-large-number-of-consecutive-values-fulfilling-condition-in-a-numpy-array
""" # NOQA
# Find the indicies of changes in "condition"
diff = np.diff(condition)
idx, = diff.nonzero()
# We need to start things after the change in "condition". Therefore,
# we'll shift the index by 1 to the right.
idx += 1
if condition[0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if condition[-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, condition.size]
# Reshape the result into two columns
idx.shape = (-1, 2)
return idx
def check_gtis(gti):
"""Check if GTIs are well-behaved. No start>end, no overlaps.
Raises
------
AssertionError
If GTIs are not well-behaved.
"""
gti_start = gti[:, 0]
gti_end = gti[:, 1]
logging.debug('-- GTI: ' + repr(gti))
# Check that GTIs are well-behaved
assert np.all(gti_end >= gti_start), 'This GTI is incorrect'
# Check that there are no overlaps in GTIs
assert np.all(gti_start[1:] >= gti_end[:-1]), 'This GTI has overlaps'
logging.debug('-- Correct')
return
def create_gti_mask(time, gtis, safe_interval=0, min_length=0,
return_new_gtis=False, dt=None):
"""Create GTI mask.
Assumes that no overlaps are present between GTIs
Parameters
----------
time : float array
gtis : [[g0_0, g0_1], [g1_0, g1_1], ...], float array-like
Returns
-------
mask : boolean array
new_gtis : Nx2 array
Other parameters
----------------
safe_interval : float or [float, float]
A safe interval to exclude at both ends (if single float) or the start
and the end (if pair of values) of GTIs.
min_length : float
return_new_gtis : bool
dt : float
"""
import collections
check_gtis(gtis)
dt = assign_value_if_none(dt,
np.zeros_like(time) + (time[1] - time[0]) / 2)
mask = np.zeros(len(time), dtype=bool)
if not isinstance(safe_interval, collections.Iterable):
safe_interval = [safe_interval, safe_interval]
newgtis = np.zeros_like(gtis)
# Whose GTIs, including safe intervals, are longer than min_length
newgtimask = np.zeros(len(newgtis), dtype=np.bool)
for ig, gti in enumerate(gtis):
limmin, limmax = gti
limmin += safe_interval[0]
limmax -= safe_interval[1]
if limmax - limmin >= min_length:
newgtis[ig][:] = [limmin, limmax]
cond1 = time - dt >= limmin
cond2 = time + dt <= limmax
good = np.logical_and(cond1, cond2)
mask[good] = True
newgtimask[ig] = True
res = mask
if return_new_gtis:
res = [res, newgtis[newgtimask]]
return res
def create_gti_from_condition(time, condition,
safe_interval=0, dt=None):
"""Create a GTI list from a time array and a boolean mask ("condition").
Parameters
----------
time : array-like
Array containing times
condition : array-like
An array of bools, of the same length of time.
A possible condition can be, e.g., the result of lc > 0.
Returns
-------
gtis : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
The newly created GTIs
Other parameters
----------------
safe_interval : float or [float, float]
A safe interval to exclude at both ends (if single float) or the start
and the end (if pair of values) of GTIs.
dt : float
The width (in sec) of each bin of the time array. Can be irregular.
"""
import collections
assert len(time) == len(condition), \
'The length of the condition and time arrays must be the same.'
idxs = contiguous_regions(condition)
if not isinstance(safe_interval, collections.Iterable):
safe_interval = [safe_interval, safe_interval]
dt = assign_value_if_none(dt,
np.zeros_like(time) + (time[1] - time[0]) / 2)
gtis = []
for idx in idxs:
logging.debug(idx)
startidx = idx[0]
stopidx = idx[1] - 1
t0 = time[startidx] - dt[startidx] + safe_interval[0]
t1 = time[stopidx] + dt[stopidx] - safe_interval[1]
if t1 - t0 < 0:
continue
gtis.append([t0, t1])
return np.array(gtis)
def cross_two_gtis(gti0, gti1):
"""Extract the common intervals from two GTI lists *EXACTLY*.
Parameters
----------
gti0 : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
gti1 : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
Returns
-------
gtis : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
The newly created GTIs
See Also
--------
cross_gtis : From multiple GTI lists, extract common intervals *EXACTLY*
"""
gti0 = np.array(gti0, dtype=np.longdouble)
gti1 = np.array(gti1, dtype=np.longdouble)
# Check GTIs
check_gtis(gti0)
check_gtis(gti1)
gti0_start = gti0[:, 0]
gti0_end = gti0[:, 1]
gti1_start = gti1[:, 0]
gti1_end = gti1[:, 1]
# Create a list that references to the two start and end series
gti_start = [gti0_start, gti1_start]
gti_end = [gti0_end, gti1_end]
# Concatenate the series, while keeping track of the correct origin of
# each start and end time
gti0_tag = np.array([0 for g in gti0_start], dtype=bool)
gti1_tag = np.array([1 for g in gti1_start], dtype=bool)
conc_start = np.concatenate((gti0_start, gti1_start))
conc_end = np.concatenate((gti0_end, gti1_end))
conc_tag = np.concatenate((gti0_tag, gti1_tag))
# Put in time order
order = np.argsort(conc_end)
conc_start = conc_start[order]
conc_end = conc_end[order]
conc_tag = conc_tag[order]
last_end = conc_start[0] - 1
final_gti = []
for ie, e in enumerate(conc_end):
# Is this ending in series 0 or 1?
this_series = conc_tag[ie]
other_series = not this_series
# Check that this closes intervals in both series.
# 1. Check that there is an opening in both series 0 and 1 lower than e
try:
st_pos = \
np.argmax(gti_start[this_series][gti_start[this_series] < e])
so_pos = \
np.argmax(gti_start[other_series][gti_start[other_series] < e])
st = gti_start[this_series][st_pos]
so = gti_start[other_series][so_pos]
s = max([st, so])
except: # pragma: no cover
continue
# If this start is inside the last interval (It can happen for equal
# GTI start times between the two series), then skip!
if s <= last_end:
continue
# 2. Check that there is no closing before e in the "other series",
# from intervals starting either after s, or starting and ending
# between the last closed interval and this one
cond1 = (gti_end[other_series] > s) * (gti_end[other_series] < e)
cond2 = gti_end[other_series][so_pos] < s
condition = np.any(np.logical_or(cond1, cond2))
# Well, if none of the conditions at point 2 apply, then you can
# create the new gti!
if not condition:
final_gti.append([s, e])
last_end = e
return np.array(final_gti, dtype=np.longdouble)
def cross_gtis(gti_list):
"""From multiple GTI lists, extract the common intervals *EXACTLY*.
Parameters
----------
gti_list : array-like
List of GTI arrays, each one in the usual format [[gti0_0, gti0_1],
[gti1_0, gti1_1], ...]
Returns
-------
gtis : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
The newly created GTIs
See Also
--------
cross_two_gtis : Extract the common intervals from two GTI lists *EXACTLY*
"""
ninst = len(gti_list)
if ninst == 1:
return gti_list[0]
gti0 = gti_list[0]
for gti in gti_list[1:]:
gti0 = cross_two_gtis(gti0, gti)
return gti0
def get_btis(gtis, start_time=None, stop_time=None):
"""From GTIs, obtain bad time intervals.
GTIs have to be well-behaved, in the sense that they have to pass
`check_gtis`.
"""
# Check GTIs
if len(gtis) == 0:
assert start_time is not None and stop_time is not None, \
'Empty GTI and no valid start_time and stop_time. BAD!'
return np.array([[start_time, stop_time]], dtype=np.longdouble)
check_gtis(gtis)
start_time = assign_value_if_none(start_time, gtis[0][0])
stop_time = assign_value_if_none(stop_time, gtis[-1][1])
if gtis[0][0] - start_time <= 0:
btis = []
else:
btis = [[gtis[0][0] - start_time]]
# Transform GTI list in
flat_gtis = gtis.flatten()
new_flat_btis = zip(flat_gtis[1:-2:2], flat_gtis[2:-1:2])
btis.extend(new_flat_btis)
if stop_time - gtis[-1][1] > 0:
btis.extend([[gtis[0][0] - stop_time]])
return np.array(btis, dtype=np.longdouble)
def gti_len(gti):
"""Return the total good time from a list of GTIs."""
return np.sum([g[1] - g[0] for g in gti])
def _save_pickle_object(object, filename):
pickle.dump(object, open(filename, "wb" ))
def _retrieve_pickle_object(filename):
return pickle.load(open(filename, "rb" ) )
def _save_hdf5_object(object, filename):
pass
def _retrieve_hdf5_object(object, filename):
pass
def _save_ascii_object(object, filename):
pass
def _retrieve_ascii_object(object, filename):
pass
def write(input, filename, format = 'pickle'):
"""
Pickle a class instance.
Parameters
----------
object: a class instance
filename: str
name of the file to be created.
format: str
pickle, hdf5, ascii ...
"""
if format == 'pickle':
_save_pickle_object(input, filename)
elif format == 'hdf5':
_save_hdf5_object(input, filename)
elif format == 'ascii':
_save_ascii_object(input, filename)
def read(filename, format = 'pickle'):
"""
Return a pickled class instance.
Parameters
----------
filename: str
name of the file to be retrieved.
format: str
pickle, hdf5, ascii ...
"""
if format == 'pickle':
return _retrieve_pickle_object(filename)
elif format == 'hdf5':
return _retrieve_hdf5_object(filename)
elif format == 'ascii':
return _retrieve_ascii_object(filename)
def savefig(filename, **kwargs):
"""
Save a figure plotted by Matplotlib.
Note : This function is supposed to be used after the ``plot``
function. Otherwise it will save a blank image with no plot.
Parameters
----------
filename : str
The name of the image file. Extension must be specified in the
file name. For example filename with `.png` extension will give a
rasterized image while `.pdf` extension will give a vectorized
output.
kwargs : keyword arguments
Keyword arguments to be passed to ``savefig`` function of
``matplotlib.pyplot``. For example use `bbox_inches='tight'` to
remove the undesirable whitepace around the image.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for savefig()")
if not plt.fignum_exists(1):
utils.simon("use ``plot`` function to plot the image first and "
"then use ``savefig`` to save the figure.")
plt.savefig(filename, **kwargs)
| mit |
alexblaessle/PyFRAP | docs/source/conf.py | 2 | 11634 | # -*- coding: utf-8 -*-
#
# PyFRAP documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 25 21:59:01 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# -- Managing paths ------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../../pyfrp/'))
sys.path.insert(0, os.path.abspath('../../pyfrp/modules/'))
sys.path.insert(0, os.path.abspath('../../pyfrp/gui/'))
sys.path.insert(0, os.path.abspath('../../pyfrp/subclasses/'))
# -- Mocking packages ------------------------------------------------
#Mocking numpy and matplotlib so they don't crash readthedocs ### NOTE: might need to add numpy here again
#from mock import Mock as MagicMock
#class Mock(MagicMock):
#@classmethod
#def __getattr__(cls, name):
#return Mock()
#If on RTD, moch some stuff
if os.environ.get('READTHEDOCS', None) == 'True':
import mock
MOCK_MODULES = [
#Colorama
'colorama',
#numpy
'numpy','numpy.core','numpy.core.multiarray',
#Fipy
'fipy',
#cv2
'cv2',
#Scipy
'scipy','scipy.optimize','scipy.interpolate','scipy.ndimage','scipy.ndimage.interpolation',
#Matplotlib (and all submodules)
'matplotlib', 'matplotlib.pyplot','matplotlib.axes','mpl_toolkits','mpl_toolkits.mplot3d','matplotlib.collections',
'matplotlib.patches','matplotlib.tri','matplotlib.backends','matplotlib.backends.backend_qt4agg',
'matplotlib.bakends.backend_qt4agg.FigureCanvasQTAgg','matplotlib.backends.backend_qt4agg.FigureCanvas',
'matplotlib.figure','mpl_toolkits.mplot3d.art3d',
#Skimage
'skimage','skimage.io','skimage.morphology','scipy.signal','skimage.measure','skimage.filters',
#PyQT
'PyQt4','PyQt4.QtGui','PyQt4.QtCore',
#MeshIO
'meshio',
#VTK
'vtk',
#Solid
'solid','solid.utils',
#STL
'stl']
#sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyFRAP'
copyright = u'2016, Alexander Blaessle'
author = u'Alexander Blaessle'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['../../pyfrp/__init__.py','../../pyfrp/subclasses/__init__.py','../../pyfrp/modules/__init__.py','../../pyfrp/gui/__init__.py','../../pyfrp/PyFRAP.py','../../pyfrp/gui/**','pyfrp.gui.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyFRAPdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyFRAP.tex', u'PyFRAP Documentation',
u'Alexander Blaessle', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyfrap', u'PyFRAP Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyFRAP', u'PyFRAP Documentation',
author, 'PyFRAP', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 |
wanghaven/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/cm.py | 70 | 5385 | """
This module contains the instantiations of color mapping classes
"""
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cbook as cbook
from matplotlib._cm import *
def get_cmap(name=None, lut=None):
"""
Get a colormap instance, defaulting to rc values if *name* is None
"""
if name is None: name = mpl.rcParams['image.cmap']
if lut is None: lut = mpl.rcParams['image.lut']
assert(name in datad.keys())
return colors.LinearSegmentedColormap(name, datad[name], lut)
class ScalarMappable:
"""
This is a mixin class to support scalar -> RGBA mapping. Handles
normalization and colormapping
"""
def __init__(self, norm=None, cmap=None):
"""
*norm* is an instance of :class:`colors.Normalize` or one of
its subclasses, used to map luminance to 0-1. *cmap* is a
:mod:`cm` colormap instance, for example :data:`cm.jet`
"""
self.callbacksSM = cbook.CallbackRegistry((
'changed',))
if cmap is None: cmap = get_cmap()
if norm is None: norm = colors.Normalize()
self._A = None
self.norm = norm
self.cmap = cmap
self.colorbar = None
self.update_dict = {'array':False}
def set_colorbar(self, im, ax):
'set the colorbar image and axes associated with mappable'
self.colorbar = im, ax
def to_rgba(self, x, alpha=1.0, bytes=False):
'''Return a normalized rgba array corresponding to *x*. If *x*
is already an rgb array, insert *alpha*; if it is already
rgba, return it unchanged. If *bytes* is True, return rgba as
4 uint8s instead of 4 floats.
'''
try:
if x.ndim == 3:
if x.shape[2] == 3:
if x.dtype == np.uint8:
alpha = np.array(alpha*255, np.uint8)
m, n = x.shape[:2]
xx = np.empty(shape=(m,n,4), dtype = x.dtype)
xx[:,:,:3] = x
xx[:,:,3] = alpha
elif x.shape[2] == 4:
xx = x
else:
raise ValueError("third dimension must be 3 or 4")
if bytes and xx.dtype != np.uint8:
xx = (xx * 255).astype(np.uint8)
return xx
except AttributeError:
pass
x = ma.asarray(x)
x = self.norm(x)
x = self.cmap(x, alpha=alpha, bytes=bytes)
return x
def set_array(self, A):
'Set the image array from numpy array *A*'
self._A = A
self.update_dict['array'] = True
def get_array(self):
'Return the array'
return self._A
def get_cmap(self):
'return the colormap'
return self.cmap
def get_clim(self):
'return the min, max of the color limits for image scaling'
return self.norm.vmin, self.norm.vmax
def set_clim(self, vmin=None, vmax=None):
"""
set the norm limits for image scaling; if *vmin* is a length2
sequence, interpret it as ``(vmin, vmax)`` which is used to
support setp
ACCEPTS: a length 2 sequence of floats
"""
if (vmin is not None and vmax is None and
cbook.iterable(vmin) and len(vmin)==2):
vmin, vmax = vmin
if vmin is not None: self.norm.vmin = vmin
if vmax is not None: self.norm.vmax = vmax
self.changed()
def set_cmap(self, cmap):
"""
set the colormap for luminance data
ACCEPTS: a colormap
"""
if cmap is None: cmap = get_cmap()
self.cmap = cmap
self.changed()
def set_norm(self, norm):
'set the normalization instance'
if norm is None: norm = colors.Normalize()
self.norm = norm
self.changed()
def autoscale(self):
"""
Autoscale the scalar limits on the norm instance using the
current array
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale(self._A)
self.changed()
def autoscale_None(self):
"""
Autoscale the scalar limits on the norm instance using the
current array, changing only limits that are None
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale_None(self._A)
self.changed()
def add_checker(self, checker):
"""
Add an entry to a dictionary of boolean flags
that are set to True when the mappable is changed.
"""
self.update_dict[checker] = False
def check_update(self, checker):
"""
If mappable has changed since the last check,
return True; else return False
"""
if self.update_dict[checker]:
self.update_dict[checker] = False
return True
return False
def changed(self):
"""
Call this whenever the mappable is changed to notify all the
callbackSM listeners to the 'changed' signal
"""
self.callbacksSM.process('changed', self)
for key in self.update_dict:
self.update_dict[key] = True
| agpl-3.0 |
nhuntwalker/astroML | book_figures/chapter8/fig_cross_val_D.py | 3 | 3153 | """
Cross Validation Examples: Part 4
---------------------------------
Figure 8.15
The learning curves for the data given by eq. 8.75, with d = 2 and d = 3. Both
models have high variance for a few data points, visible in the spread between
training and validation error. As the number of points increases, it is clear
that d = 2 is a high-bias model which cannot be improved simply by adding
training points.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import ticker
from matplotlib.patches import FancyArrow
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Define our functional form
def func(x, dy=0.1):
return np.random.normal(np.sin(x) * x, dy)
#------------------------------------------------------------
# select the (noisy) data
np.random.seed(0)
x = np.linspace(0, 3, 22)[1:-1]
dy = 0.1
y = func(x, dy)
#------------------------------------------------------------
# Select the cross-validation points
np.random.seed(1)
x_cv = 3 * np.random.random(20)
y_cv = func(x_cv)
x_fit = np.linspace(0, 3, 1000)
#------------------------------------------------------------
# Fourth figure: plot errors as a function of training set size
np.random.seed(0)
x = 3 * np.random.random(100)
y = func(x)
np.random.seed(1)
x_cv = 3 * np.random.random(100)
y_cv = func(x_cv)
Nrange = np.arange(10, 101, 2)
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(left=0.15, top=0.95)
for subplot, d in zip([211, 212], [2, 3]):
ax = fig.add_subplot(subplot)
training_err = np.zeros(Nrange.shape)
crossval_err = np.zeros(Nrange.shape)
for j, N in enumerate(Nrange):
p = np.polyfit(x[:N], y[:N], d)
training_err[j] = np.sqrt(np.sum((np.polyval(p, x[:N])
- y[:N]) ** 2) / len(y))
crossval_err[j] = np.sqrt(np.sum((np.polyval(p, x_cv)
- y_cv) ** 2) / len(y_cv))
ax.plot(Nrange, crossval_err, '--k', label='cross-validation')
ax.plot(Nrange, training_err, '-k', label='training')
ax.plot(Nrange, 0.1 * np.ones(Nrange.shape), ':k')
ax.legend(loc=1)
ax.text(0.03, 0.94, "d = %i" % d, transform=ax.transAxes,
ha='left', va='top', bbox=dict(ec='k', fc='w', pad=10))
ax.set_ylim(0, 0.4)
ax.set_xlabel('Number of training points')
ax.set_ylabel('rms error')
plt.show()
| bsd-2-clause |
stylianos-kampakis/scikit-learn | doc/sphinxext/gen_rst.py | 106 | 40198 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet, is_backref=False):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. only:: html\n\n')
out.append(' .. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
if is_backref:
out.append('.. only:: not html\n\n * :ref:`example_%s`' % ref_name)
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet, is_backref=True))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
jayflo/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
MartinSavc/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
flavour/Turkey | modules/s3chart.py | 16 | 10983 | # -*- coding: utf-8 -*-
""" S3 Charting Toolkit
@copyright: 2011-15 (c) Sahana Software Foundation
@license: MIT
@requires: U{B{I{NumPy}} <http://www.numpy.org>}
@requires: U{B{I{MatPlotLib}} <http://matplotlib.sourceforge.net>}
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3Chart"]
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon import current
from gluon.storage import Storage
from gluon.html import IMG
# =============================================================================
class S3Chart(object):
"""
Module for graphing
Currently a simple wrapper to matplotlib
"""
# This folder needs to be writable by the web2py process
CACHE_PATH = "/%s/static/cache/chart" % current.request.application
# -------------------------------------------------------------------------
def __init__(self, path, width=9, height=6):
"""
Create the base Figure object
@param: height x100px
@param: width x100px
"""
try:
# Causes deadlocking issues
# http://sjohannes.wordpress.com/2010/06/11/using-matplotlib-in-a-web-application/
#import matplotlib
#matplotlib.use("Agg")
#import matplotlib.pyplot as plt
#from pylab import savefig
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
self.FigureCanvas = FigureCanvas
from matplotlib.figure import Figure
self.Figure = Figure
MATPLOTLIB = True
except ImportError:
import sys
print >> sys.stderr, "WARNING: S3Chart unresolved dependency: matplotlib required for charting"
MATPLOTLIB = False
self.filename = path
self.width = width
self.height = height
self.asInt = False
if MATPLOTLIB:
self.fig = Figure(figsize=(width, height))
else:
self.fig = None
# -------------------------------------------------------------------------
@staticmethod
def getCachedPath(filename):
import os
path = "applications"
chartFile = "%s/%s.png" % (S3Chart.CACHE_PATH, filename)
fullPath = "%s%s" % (path, chartFile)
if os.path.exists(fullPath):
return chartFile
else:
return None
# -------------------------------------------------------------------------
@staticmethod
def getCachedFile(filename):
"""
Return the opened cached file, if the file can't be found then
return None
"""
chartFile = S3Chart.getCachedPath(filename)
if chartFile:
try:
f = open(chartFile)
return f.read()
except:
# for some reason been unable to get the cached version
pass
return None
# -------------------------------------------------------------------------
@staticmethod
def storeCachedFile(filename, image):
"""
Save the file in the cache area, and return the path to this file
"""
path = "applications"
chartFile = "%s/%s.png" % (S3Chart.CACHE_PATH, filename)
fullPath = "%s%s" % (path, chartFile)
try:
f = open(fullPath, "w+")
print >> f, image
except:
return None
return chartFile
# -------------------------------------------------------------------------
@staticmethod
def purgeCache(prefix=None):
"""
Delete the files in the cache that match the file name prefix,
if the prefix is None then all files will be deleted
"""
import os
folder = "applications%s/" % S3Chart.CACHE_PATH
if os.path.exists(folder):
filelist = os.listdir(folder)
for file in filelist:
if prefix == None or file.startswith(prefix):
os.remove("%s%s" % (folder, file))
# -------------------------------------------------------------------------
def draw(self, output="xml"):
"""
Output the chart as a PNG embedded in an IMG tag
- used by the Delphi module
"""
fig = self.fig
if not fig:
return "Matplotlib not installed"
# For interactive shell tests
#plt.show()
# For web response
#savefig(response.body)
chart = Storage()
chart.body = StringIO()
chart.headers = Storage()
chart.headers["Content-Type"] = "image/png"
canvas = self.FigureCanvas(fig)
canvas.print_figure(chart.body)
#return response.body.getvalue()
image = chart.body.getvalue()
# IE 8 and before has a 32K limit on URIs this can be quickly
# gobbled up if the image is too large. So the image will
# stored on the server and a URI used in the src
cachePath = self.storeCachedFile(self.filename, image)
if output == "xml":
if cachePath != None:
image = IMG(_src = cachePath)
else:
import base64
base64Img = base64.b64encode(image)
image = IMG(_src="data:image/png;base64,%s" % base64Img)
else:
current.response.headers["Content-Type"] = "image/png"
return image
# -------------------------------------------------------------------------
def survey_hist(self, title,
data, bins, min, max, xlabel=None, ylabel=None):
"""
Draw a Histogram
- used by the Survey module
"""
fig = self.fig
if not fig:
return "Matplotlib not installed"
from numpy import arange
# Draw a histogram
ax = fig.add_subplot(111)
ax.hist(data, bins=bins, range=(min, max))
left = arange(0, bins + 1)
if self.asInt:
label = left * int(max / bins)
else:
label = left * max / bins
ax.set_xticks(label)
ax.set_xticklabels(label, rotation=30)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# -------------------------------------------------------------------------
def survey_pie(self, title, data, label):
"""
Draw a Pie Chart
- used by the Survey module
"""
fig = self.fig
if not fig:
return "Matplotlib not installed"
# Draw a pie chart
ax = fig.add_subplot(111)
ax.pie(data, labels=label)
ax.legend()
ax.set_title(title)
# -------------------------------------------------------------------------
def survey_bar(self, title, data, labels, legendLabels):
"""
Draw a Bar Chart
- used by the Survey module
"""
barColourList = ["#F2D7A0", "#7B77A8", "#69889A", "#9D7B34"]
barColourListExt = [(242, 215, 160),
(123, 118, 168),
(105, 136, 154),
(157, 123, 52)
]
fig = self.fig
if not fig:
return "Matplotlib not installed"
from numpy import arange
# Draw a bar chart
if not isinstance(data[0],list):
dataList = [data]
else:
dataList = data
legendColCnt = 3
cnt = len(labels)
dcnt = len(dataList)
lcnt = 0
if legendLabels != None:
lcnt = (len(legendLabels) + legendColCnt - 1) / legendColCnt
width = 0.9 / dcnt
offset = 0
gap = 0.1 / dcnt
bcnt = 0
bars = []
height = max(0.2, 0.85 - (0.04 * lcnt))
rect = [0.08, 0.08, 0.9, height]
ax = fig.add_axes(rect)
for data in dataList:
left = arange(offset, cnt + offset) # the x locations for the bars
if bcnt < 3:
colour = barColourList[bcnt]
else:
colour = []
colourpart = barColourListExt[bcnt%4]
divisor = 256.0 - (32 * bcnt/4)
if divisor < 0.0:
divisor = divisor * -1
for part in colourpart:
calc = part/divisor
while calc > 1.0:
calc -= 1
colour.append(calc)
plot = ax.bar(left, data, width=width, color=colour)
bars.append(plot[0])
bcnt += 1
offset += width + gap
left = arange(cnt)
lblAdjust = (1.0 - gap) * 0.5
if cnt <= 3:
angle = 0
elif cnt <= 10:
angle = -10
elif cnt <= 20:
angle = -30
else:
angle = -45
ax.set_xticks(left + lblAdjust)
try: # This function is only available with version 1.1 of matplotlib
ax.set_xticklabels(labels, rotation=angle)
ax.tick_params(labelsize=self.width)
except AttributeError:
newlabels = []
for label in labels:
if len(label) > 12:
label = label[0:10] + "..."
newlabels.append(label)
ax.set_xticklabels(newlabels)
ax.set_title(title)
if legendLabels != None:
fig.legend(bars,
legendLabels,
"upper left",
mode="expand",
ncol = legendColCnt,
prop={"size":10},
)
# END =========================================================================
| mit |
HaroldMills/Vesper | scripts/detector_eval/manual/plot_precision_vs_calls.py | 1 | 25972 | """
Script that plots precision vs. number of calls for several detectors.
The plots are made for the archive of the current working directory
using the "Classification" and "Detector Score" annotations created
by the detectors.
"""
from pathlib import Path
import itertools
import sqlite3
from bokeh.models import Range1d
from bokeh.models.tickers import SingleIntervalTicker
from bokeh.plotting import figure, output_file, show
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.ticker import MultipleLocator
import matplotlib.pyplot as plt
import numpy as np
CREATE_MATPLOTLIB_PLOTS = True
CREATE_BOKEH_PLOTS = False
CREATE_SEPARATE_STATION_NIGHT_PLOTS = False
DATABASE_FILE_NAME = 'Archive Database.sqlite'
# PLOTS_DIR_PATH = Path(
# '/Users/harold/Desktop/NFC/Data/MPG Ranch/2018/Detector Comparison/'
# '0.0/Plots')
PLOTS_DIR_PATH = Path('/Users/harold/Desktop/Plots')
MATPLOTLIB_PLOT_FILE_NAME = 'Detector Precision vs. Calls.pdf'
BOKEH_PLOT_FILE_NAME_FORMAT = '{}_{}.html'
ALL_STATION_NIGHTS_PLOT_FILE_NAME = 'All Station-Nights.html'
# DETECTOR_NAMES = [
# 'MPG Ranch Tseep Detector 0.0 40',
# 'MPG Ranch Thrush Detector 0.0 40',
# 'BirdVoxDetect 0.1.a0 AT 02',
# 'BirdVoxDetect 0.1.a0 AT 05'
# ]
DETECTOR_NAMES = [
'MPG Ranch Thrush Detector 1.0 40',
'MPG Ranch Tseep Detector 1.0 20',
]
PLOT_LINE_DATA = {
# 'MPG Ranch Tseep 0.0': ('MPG Ranch Tseep Detector 0.0 40', 'blue'),
# 'MPG Ranch Thrush 0.0': ('MPG Ranch Thrush Detector 0.0 40', 'green'),
'MPG Ranch Thrush 1.0': ('MPG Ranch Thrush Detector 1.0 40', 'green'),
'MPG Ranch Tseep 1.0': ('MPG Ranch Tseep Detector 1.0 20', 'blue'),
# Combination of MPG Ranch Tseep and Thrush detectors. It is a little
# unfair to the detectors to sum their counts, since it effectively
# forces them to share a threshold that would otherwise be chosen
# independently for the different detectors to optimize their
# performance, but the summation yields a precision-calls curve that
# is more directly comparable to that of BirdVoxDetect.
# 'MPG Ranch Combined 0.0': (
# ('MPG Ranch Tseep Detector 0.0 40',
# 'MPG Ranch Thrush Detector 0.0 40'), 'red'),
# This accommodates the fact that we used two different thresholds
# when we ran BirdVoxDetect on a set of August, 2019 MPG Ranch
# recordings. We ran the detector with a threshold of 2 on some
# of the recordings, and with a threshold of 5 on the others. In
# a case like this, in which exactly one of two detectors was run
# on a given recording, summing counts from the two detectors for
# a recording yields the counts of whichever detector was run on
# that recording, since the counts for the detector that wasn't
# run are all zero and contribute nothing to the sum.
# 'BirdVoxDetect 0.1.a0 AT': (
# ('BirdVoxDetect 0.1.a0 AT 02',
# 'BirdVoxDetect 0.1.a0 AT 05'), 'black'),
}
OLD_BIRD_DETECTOR_NAMES = [
'Old Bird Thrush Detector Redux 1.1',
'Old Bird Tseep Detector Redux 1.1',
]
OLD_BIRD_PLOT_DATA = {
'Old Bird Thrush Redux 1.1':
('Old Bird Thrush Detector Redux 1.1', 'green'),
'Old Bird Tseep Redux 1.1':
('Old Bird Tseep Detector Redux 1.1', 'blue'),
}
ARCHIVE_NAMES = ['Part 1', 'Part 2']
ARCHIVE_INFOS = {
'Part 1': (
Path(
'/Users/harold/Desktop/NFC/Data/MPG Ranch/'
'2019-07 Detector Development/Evaluation Archives/2018 Part 1'),
# Station-nights for 2018 MPG Ranch August archive, from output of
# `scripts.detector_eval.manual.prune_recordings` script.
'''
Angel / 2018-08-17
Bear / 2018-08-09
Bell Crossing / 2018-08-01
Bivory / 2018-08-31
CB Ranch / 2018-08-18
Coki / 2018-08-12
Cricket / 2018-08-14
Darby High School PC / 2018-08-28
Dashiell / 2018-08-23
Deer Mountain Lookout / 2018-08-10
DonnaRae / 2018-08-04
Dreamcatcher / 2018-08-29
Esmerelda / 2018-08-28
Evander / 2018-08-25
Florence High School / 2018-08-17
Grandpa's Pond / 2018-08-30
Heron Crossing / 2018-08-15
IBO Lucky Peak / 2018-08-27
IBO River / 2018-08-23
JJ / 2018-08-11
KBK / 2018-08-10
Kate / 2018-08-18
Lee Metcalf NWR / 2018-08-19
Lilo / 2018-08-13
Lost Trail / 2018-08-05
MPG North / 2018-08-11
MPG Ranch Floodplain SM2 / 2018-08-20
MPG Ranch Ridge / 2018-08-23
MPG Ranch Sheep Camp / 2018-08-29
MPG Ranch Subdivision / 2018-08-18
MPG Ranch Zumwalt Ridge / 2018-08-20
Max / 2018-08-26
Meadowlark / 2018-08-08
Mickey / 2018-08-09
Mitzi / 2018-08-02
Molly / 2018-08-22
Oxbow / 2018-08-07
Panda / 2018-08-24
Petey / 2018-08-20
Pocket Gopher / 2018-08-16
Sadie-Kate / 2018-08-11
Sasquatch / 2018-08-19
Seeley High School / 2018-08-20
Sleeman / 2018-08-08
Slocum / 2018-08-24
St Mary Lookout / 2018-08-15
Sula Peak Lookout / 2018-08-31
Sula Ranger Station / 2018-08-31
Teller / 2018-08-13
Walnut / 2018-08-07
Willow Mountain Lookout / 2018-08-17
YVAS / 2018-08-02
Zuri / 2018-08-13
'''
),
'Part 2': (
Path(
'/Users/harold/Desktop/NFC/Data/MPG Ranch/'
'2019-07 Detector Development/Evaluation Archives/2018 Part 2'),
# Station-nights for 2018 MPG Ranch September archive, from output of
# `scripts.detector_eval.manual.prune_recordings` script.
'''
Angel / 2018-09-30
Bear / 2018-09-09
Bell Crossing / 2018-09-20
Bivory / 2018-09-05
CB Ranch / 2018-09-23
Coki / 2018-09-19
Cricket / 2018-09-12
Darby High School PC / 2018-09-11
Dashiell / 2018-09-11
Deer Mountain Lookout / 2018-09-16
DonnaRae / 2018-09-23
Dreamcatcher / 2018-09-25
Esmerelda / 2018-09-08
Evander / 2018-09-07
Florence High School / 2018-09-20
Grandpa's Pond / 2018-09-08
Heron Crossing / 2018-09-04
IBO Lucky Peak / 2018-09-13
IBO River / 2018-09-09
JJ / 2018-09-04
KBK / 2018-09-11
Kate / 2018-09-25
Lee Metcalf NWR / 2018-09-02
Lilo / 2018-09-12
Lost Trail / 2018-09-03
MPG North / 2018-09-12
MPG Ranch Floodplain / 2018-09-30
MPG Ranch Ridge / 2018-09-10
MPG Ranch Sheep Camp / 2018-09-14
MPG Ranch Subdivision / 2018-09-02
Max / 2018-09-20
Meadowlark / 2018-09-26
Mickey / 2018-09-14
Mitzi / 2018-09-06
Molly / 2018-09-24
Oxbow / 2018-09-09
Panda / 2018-09-08
Petey / 2018-09-12
Pocket Gopher / 2018-09-20
Sasquatch / 2018-09-30
Seeley High School / 2018-09-14
Sleeman / 2018-09-13
Slocum / 2018-09-10
St Mary Lookout / 2018-09-05
Sula Peak Lookout / 2018-09-03
Sula Ranger Station / 2018-09-14
Teller / 2018-09-07
Walnut / 2018-09-01
Willow Mountain Lookout / 2018-09-01
YVAS / 2018-09-18
Zuri / 2018-09-20
'''
)
}
NUM_SCORE_DECIMAL_PLACES = 2
QUERY_FORMAT = '''
select
cast({} * round(score.value, {}) as integer) as Score,
count(*) as Clips
from
vesper_clip as clip
inner join vesper_processor as processor
on clip.creating_processor_id = processor.id
inner join vesper_station as station
on clip.station_id = station.id
inner join vesper_string_annotation as score
on clip.id = score.clip_id
inner join vesper_annotation_info as score_info
on score.info_id = score_info.id
inner join vesper_string_annotation as classification
on clip.id = classification.clip_id
inner join vesper_annotation_info as classification_info
on classification.info_id = classification_info.id
where
processor.name = ? and
station.name = ? and
clip.date = ? and
score_info.name = 'Detector Score' and
classification_info.name = 'Classification' and
classification.value {}
group by Score;
'''
CALL_CLIPS_QUERY = QUERY_FORMAT.format(
10 ** NUM_SCORE_DECIMAL_PLACES, NUM_SCORE_DECIMAL_PLACES, "like 'Call%'")
NOISE_CLIPS_QUERY = QUERY_FORMAT.format(
10 ** NUM_SCORE_DECIMAL_PLACES, NUM_SCORE_DECIMAL_PLACES, "= 'Noise'")
# TODO: For each plot line, automatically plot to the lowest score for
# which relevant clips were not pruned from the archive database. Note
# that that score may vary with the plot line (detector(s) and
# station-night). I believe it is safe to use for each combination of
# detector, station, and night the lowest score that a clip for that
# combination has in the archive database, and that that score can be
# determined from the clip counts that we retrieve from the database.
MIN_PLOT_LINE_SCORES = {
'MPG Ranch Thrush 1.0': 40,
'MPG Ranch Tseep 1.0': 20,
# 46 for Part 1, 30 for Part 2, 46 for both
'BirdVoxDetect 0.1.a0 AT': 46
}
DEFAULT_MIN_PLOT_LINE_SCORE = 80
OLD_BIRD_QUERY_FORMAT = '''
select
count(*) as Clips
from
vesper_clip as clip
inner join vesper_processor as processor
on clip.creating_processor_id = processor.id
inner join vesper_station as station
on clip.station_id = station.id
inner join vesper_string_annotation as classification
on clip.id = classification.clip_id
inner join vesper_annotation_info as classification_info
on classification.info_id = classification_info.id
where
processor.name = ? and
station.name = ? and
clip.date = ? and
classification_info.name = 'Classification' and
classification.value {};
'''
OLD_BIRD_CALL_CLIPS_QUERY = OLD_BIRD_QUERY_FORMAT.format("like 'Call%'")
OLD_BIRD_NOISE_CLIPS_QUERY = OLD_BIRD_QUERY_FORMAT.format("= 'Noise'")
def main():
print('Getting clip counts...')
clip_counts = get_clip_counts()
old_bird_clip_counts = get_old_bird_clip_counts()
if CREATE_MATPLOTLIB_PLOTS:
print('Creating Matplotlib plots...')
create_matplotlib_plots(clip_counts, old_bird_clip_counts)
if CREATE_BOKEH_PLOTS:
print('Creating Bokeh plots...')
create_bokeh_plots(clip_counts, old_bird_clip_counts)
print('Done.')
def get_clip_counts():
dicts = [get_archive_clip_counts(name) for name in ARCHIVE_NAMES]
return dict(itertools.chain.from_iterable(d.items() for d in dicts))
def get_archive_clip_counts(archive_name):
archive_dir_path, station_nights = ARCHIVE_INFOS[archive_name]
station_nights = parse_station_nights(station_nights)
return dict(
(station_night,
get_station_night_clip_counts(archive_dir_path, *station_night))
for station_night in station_nights)
def parse_station_nights(station_nights):
return [
tuple(s.strip().split(' / '))
for s in station_nights.strip().split('\n')]
def get_station_night_clip_counts(archive_dir_path, station_name, date):
get_counts = get_station_night_clip_counts_aux
return dict(
(detector_name,
get_counts(archive_dir_path, detector_name, station_name, date))
for detector_name in DETECTOR_NAMES)
def get_station_night_clip_counts_aux(
archive_dir_path, detector_name, station_name, date):
values = (detector_name, station_name, date)
call_counts = get_cumulative_clip_counts(
archive_dir_path, CALL_CLIPS_QUERY, values)
noise_counts = get_cumulative_clip_counts(
archive_dir_path, NOISE_CLIPS_QUERY, values)
return call_counts, noise_counts
def get_cumulative_clip_counts(archive_dir_path, query, values):
db_file_path = archive_dir_path / DATABASE_FILE_NAME
connection = sqlite3.connect(str(db_file_path))
with connection:
rows = connection.execute(query, values)
counts = create_clip_counts_array()
for score, count in rows:
counts[score] = count
connection.close()
# Compute cumulative clip count sums so that element i of count
# array is the number of clips whose scores are at least i.
counts = np.flip(np.cumsum(np.flip(counts)))
return counts
def create_clip_counts_array():
length = 10 ** (NUM_SCORE_DECIMAL_PLACES + 2) + 1
return np.zeros(length, dtype='int32')
def get_old_bird_clip_counts():
dicts = [get_archive_old_bird_clip_counts(name) for name in ARCHIVE_NAMES]
return dict(itertools.chain.from_iterable(d.items() for d in dicts))
def get_archive_old_bird_clip_counts(archive_name):
archive_dir_path, station_nights = ARCHIVE_INFOS[archive_name]
station_nights = parse_station_nights(station_nights)
get_counts = get_old_bird_station_night_clip_counts
return dict(
(station_night, get_counts(archive_dir_path, *station_night))
for station_night in station_nights)
def get_old_bird_station_night_clip_counts(
archive_dir_path, station_name, date):
get_counts = get_old_bird_station_night_clip_counts_aux
return dict(
(detector_name,
get_counts(archive_dir_path, detector_name, station_name, date))
for detector_name in OLD_BIRD_DETECTOR_NAMES)
def get_old_bird_station_night_clip_counts_aux(
archive_dir_path, detector_name, station_name, date):
get_count = get_old_bird_clip_count
values = (detector_name, station_name, date)
call_count = get_count(archive_dir_path, OLD_BIRD_CALL_CLIPS_QUERY, values)
noise_count = \
get_count(archive_dir_path, OLD_BIRD_NOISE_CLIPS_QUERY, values)
return call_count, noise_count
def get_old_bird_clip_count(archive_dir_path, query, values):
db_file_path = archive_dir_path / DATABASE_FILE_NAME
connection = sqlite3.connect(str(db_file_path))
with connection:
rows = connection.execute(query, values)
count = list(rows)[0][0]
connection.close()
return count
def create_matplotlib_plots(clip_counts, old_bird_clip_counts):
summed_clip_counts = sum_clip_counts(clip_counts)
summed_old_bird_clip_counts = \
sum_old_bird_clip_counts(old_bird_clip_counts)
file_path = PLOTS_DIR_PATH / MATPLOTLIB_PLOT_FILE_NAME
with PdfPages(file_path) as pdf:
create_matplotlib_plot(
pdf, 'All Station-Nights', summed_clip_counts,
summed_old_bird_clip_counts)
if CREATE_SEPARATE_STATION_NIGHT_PLOTS:
station_nights = sorted(clip_counts.keys())
for station_night in station_nights:
title = '{} / {}'.format(*station_night)
counts = clip_counts[station_night]
old_bird_counts = old_bird_clip_counts[station_night]
create_matplotlib_plot(pdf, title, counts, old_bird_counts)
def sum_clip_counts(clip_counts):
summed_clip_counts = {}
for station_night_clip_counts in clip_counts.values():
for detector_name in DETECTOR_NAMES:
try:
call_counts, noise_counts = \
station_night_clip_counts[detector_name]
except KeyError:
continue
try:
summed_call_counts, summed_noise_counts = \
summed_clip_counts[detector_name]
except KeyError:
summed_call_counts, summed_noise_counts = (
create_clip_counts_array(), create_clip_counts_array())
summed_clip_counts[detector_name] = (
summed_call_counts + call_counts,
summed_noise_counts + noise_counts)
return summed_clip_counts
def sum_old_bird_clip_counts(clip_counts):
sum_clip_counts = sum_old_bird_clip_counts_aux
return dict(
(detector_name, sum_clip_counts(detector_name, clip_counts))
for detector_name in OLD_BIRD_DETECTOR_NAMES)
def sum_old_bird_clip_counts_aux(detector_name, clip_counts):
count_pairs = [v[detector_name] for v in clip_counts.values()]
call_counts, noise_counts = tuple(zip(*count_pairs))
return sum_counts(call_counts), sum_counts(noise_counts)
def sum_counts(counts):
return np.array(counts).sum()
def create_matplotlib_plot(pdf, title, clip_counts, old_bird_clip_counts):
plt.figure(figsize=(6, 6))
axes = plt.gca()
# Create plot lines.
for line_name, (detector_names, line_color) in PLOT_LINE_DATA.items():
create_matplotlib_plot_line(
axes, line_name, detector_names, line_color, clip_counts)
# Create Old Bird markers.
for marker_name, (detector_name, marker_color) in \
OLD_BIRD_PLOT_DATA.items():
create_matplotlib_plot_marker(
axes, marker_name, detector_name, marker_color,
old_bird_clip_counts)
# Set title and axis labels.
plt.title(title)
plt.xlabel('Calls')
plt.ylabel('Precision (%)')
# Set axis limits.
plt.ylim((0, 100))
# Configure grid.
major_locator = MultipleLocator(20)
minor_locator = MultipleLocator(5)
axes.yaxis.set_major_locator(major_locator)
axes.yaxis.set_minor_locator(minor_locator)
plt.grid(which='both')
plt.grid(which='minor', alpha=.4)
# Show legend.
axes.legend(prop={'size': 8})
# axes.legend(prop={'size': 8}, loc=(.04, .13))
pdf.savefig()
plt.close()
def create_matplotlib_plot_line(
axes, line_name, detector_names, line_color, clip_counts):
data = get_plot_line_data(line_name, detector_names, clip_counts)
if data is not None:
call_counts, precisions = data
axes.plot(call_counts, precisions, color=line_color, label=line_name)
def get_plot_line_data(line_name, detector_names, clip_counts):
try:
call_counts, noise_counts = \
get_plot_line_clip_counts(detector_names, clip_counts)
except ValueError:
return None
total_counts = call_counts + noise_counts
if total_counts[0] == 0:
# no clips for this detector
return None
scores, call_counts = reduce_size(line_name, call_counts)
_, total_counts = reduce_size(line_name, total_counts)
# Trim counts as needed to avoid divides by zero in precision
# computations.
indices = np.where(total_counts == 0)[0]
if len(indices) != 0:
end = indices[0]
call_counts = call_counts[:end]
total_counts = total_counts[:end]
precisions = 100 * call_counts / total_counts.astype('float')
show_precision_table(line_name, scores, precisions)
return call_counts, precisions
def show_precision_table(line_name, scores, precisions):
print(line_name, 'precision vs. threshold:')
min_score = \
MIN_PLOT_LINE_SCORES.get(line_name, DEFAULT_MIN_PLOT_LINE_SCORE)
num_scores = 100 - min_score
f = '{:.0f},{:.1f}'
# Show precision for scores from `min_score` through 99.
for i in range(num_scores):
print(f.format(scores[i], precisions[i]))
# Show precision for score of 100.
print(f.format(scores[-1], precisions[-1]))
def get_plot_line_clip_counts(detector_names, clip_counts):
if isinstance(detector_names, tuple):
# `detector_names` is a tuple of string detector names
# Get list of (call_counts, noise_counts) count array pairs.
count_array_pairs = [
get_plot_line_clip_counts_aux(n, clip_counts)
for n in detector_names]
# Separate call count and noise count arrays into separate tuples.
call_count_arrays, noise_count_arrays = zip(*count_array_pairs)
# Sum call count arrays and noise count arrays.
call_counts = sum_arrays(call_count_arrays)
noise_counts = sum_arrays(noise_count_arrays)
return (call_counts, noise_counts)
else:
# `detector_names` is a single string detector name
return get_plot_line_clip_counts_aux(detector_names, clip_counts)
def get_plot_line_clip_counts_aux(detector_name, clip_counts):
try:
return clip_counts[detector_name]
except KeyError:
raise ValueError(
'Could not get clip counts for detector "{}".'.format(
detector_name))
def sum_arrays(arrays):
return np.stack(arrays).sum(axis=0)
def reduce_size(line_name, clip_counts):
"""
Reduce the size of the specified clip counts by removing counts
at non-integer scores below 99.
For scores from the minimum to 99 a score resolution of 1 has
been fine for (mostly) keeping our curves from looking like the
piecewise linear approximations that they are. We need higher
resolution between 99 and 100, however, to accomplish the same
goal there.
"""
min_score = \
MIN_PLOT_LINE_SCORES.get(line_name, DEFAULT_MIN_PLOT_LINE_SCORE)
percent_size = 10 ** NUM_SCORE_DECIMAL_PLACES
start = np.arange(min_score, 99, dtype='float64')
end = 99 + np.arange(percent_size + 1) / float(percent_size)
scores = np.concatenate((start, end))
m = min_score * percent_size
n = 99 * percent_size
start = clip_counts[m:n:percent_size]
end = clip_counts[n:]
counts = np.concatenate((start, end))
return scores, counts
def create_matplotlib_plot_marker(
axes, marker_name, detector_name, marker_color, old_bird_clip_counts):
call_count, noise_count = old_bird_clip_counts[detector_name]
precision = 100 * call_count / (call_count + noise_count)
axes.scatter(call_count, precision, c=marker_color, label=marker_name)
def create_bokeh_plots(clip_counts, old_bird_clip_counts):
summed_clip_counts = sum_clip_counts(clip_counts)
summed_old_bird_clip_counts = \
sum_old_bird_clip_counts(old_bird_clip_counts)
# Create plot for all station/nights.
file_path = PLOTS_DIR_PATH / ALL_STATION_NIGHTS_PLOT_FILE_NAME
create_bokeh_plot(
file_path, 'All Station-Nights', summed_clip_counts,
summed_old_bird_clip_counts)
if CREATE_SEPARATE_STATION_NIGHT_PLOTS:
station_nights = sorted(clip_counts.keys())
for station_night in station_nights:
file_path = create_bokeh_plot_file_path(*station_night)
title = '{} / {}'.format(*station_night)
counts = clip_counts[station_night]
old_bird_counts = old_bird_clip_counts[station_night]
create_bokeh_plot(file_path, title, counts, old_bird_counts)
def create_bokeh_plot(file_path, title, clip_counts, old_bird_clip_counts):
output_file(file_path)
tools = 'save'
# tools = 'hover,save,pan,box_zoom,reset,wheel_zoom'
p = figure(plot_width=700, plot_height=700, tools=tools)
# Create plot lines.
for line_name, (detector_names, line_color) in PLOT_LINE_DATA.items():
create_bokeh_plot_line(
p, line_name, detector_names, line_color, clip_counts)
# Create Old Bird markers.
for marker_name, (detector_name, marker_color) in \
OLD_BIRD_PLOT_DATA.items():
create_bokeh_plot_marker(
p, marker_name, detector_name, marker_color, old_bird_clip_counts)
p.title.text = title
p.title.text_font_size = '12pt'
p.axis.major_tick_line_color = None
p.axis.minor_tick_line_color = None
p.xaxis.axis_label = 'Calls'
p.y_range = Range1d(0, 100)
ticker = SingleIntervalTicker(interval=20, num_minor_ticks=4)
p.yaxis.axis_label = 'Precision (%)'
p.yaxis.ticker = ticker
grid_line_color = 'black'
grid_line_alpha = .3
p.xgrid.grid_line_color = grid_line_color
p.xgrid.grid_line_alpha = grid_line_alpha
p.ygrid.ticker = ticker
p.ygrid.grid_line_color = grid_line_color
p.ygrid.grid_line_alpha = grid_line_alpha
p.ygrid.minor_grid_line_color = grid_line_color
p.ygrid.minor_grid_line_alpha = .1
p.legend.location = 'top_right'
p.legend.margin = 0
p.legend.label_text_font_size = '8pt'
show(p)
def create_bokeh_plot_line(
p, line_name, detector_names, line_color, clip_counts):
data = get_plot_line_data(line_name, detector_names, clip_counts)
if data is not None:
call_counts, precisions = data
p.line(
call_counts, precisions, legend=line_name,
line_color=line_color, line_width=2)
def create_bokeh_plot_marker(
p, marker_name, detector_name, marker_color, old_bird_clip_counts):
call_count, noise_count = old_bird_clip_counts[detector_name]
precision = 100 * call_count / (call_count + noise_count)
p.circle(
call_count, precision, size=10, color=marker_color, legend=marker_name)
def create_bokeh_plot_file_path(station_name, date):
file_name = BOKEH_PLOT_FILE_NAME_FORMAT.format(station_name, date)
return PLOTS_DIR_PATH / file_name
if __name__ == '__main__':
main()
| mit |
ElDeveloper/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 22 | 1848 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
s = 50
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s, label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="c", s=s, label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="orange", s=s, label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
thunderhoser/GewitterGefahr | setup.py | 1 | 2199 | """Setup file for GewitterGefahr."""
from setuptools import setup
PACKAGE_NAMES = [
'gewittergefahr', 'gewittergefahr.gg_io', 'gewittergefahr.gg_utils',
'gewittergefahr.deep_learning', 'gewittergefahr.plotting',
'gewittergefahr.scripts', 'gewittergefahr.feature_selection_example',
'gewittergefahr.nature2019'
]
KEYWORDS = [
'machine learning', 'deep learning', 'artificial intelligence',
'data mining', 'weather', 'meteorology', 'thunderstorm', 'wind', 'tornado'
]
SHORT_DESCRIPTION = (
'End-to-end machine-learning library for predicting thunderstorm hazards.')
LONG_DESCRIPTION = (
'GewitterGefahr is an end-to-end machine-learning library for predicting '
'thunderstorm hazards, primarily tornadoes and damaging straight-line wind.'
)
CLASSIFIERS = [
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
]
# You also need to install the following packages, which are not available in
# pip. They can both be installed by "git clone" and "python setup.py install",
# the normal way one installs a GitHub package.
#
# https://github.com/matplotlib/basemap
# https://github.com/sharppy/SHARPpy
# https://github.com/tkrajina/srtm.py
PACKAGE_REQUIREMENTS = [
'numpy',
'scipy',
'roipoly',
'tensorflow',
'keras',
'scikit-learn',
'scikit-image',
'netCDF4',
'pyproj<3.0',
'opencv-python',
'matplotlib',
'pandas',
'shapely',
'descartes',
'geopy',
'metpy',
# 'basemap',
# 'python-srtm'
]
if __name__ == '__main__':
setup(name='GewitterGefahr',
version='0.1',
description=SHORT_DESCRIPTION,
long_description=LONG_DESCRIPTION,
license='MIT',
author='Ryan Lagerquist',
author_email='ryan.lagerquist@ou.edu',
url='https://github.com/thunderhoser/GewitterGefahr',
packages=PACKAGE_NAMES,
scripts=[],
keywords=KEYWORDS,
classifiers=CLASSIFIERS,
include_package_data=True,
zip_safe=False,
install_requires=PACKAGE_REQUIREMENTS)
| mit |
mattcaldwell/zipline | zipline/utils/cli.py | 4 | 6275 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import argparse
from copy import copy
from six import print_
from six.moves import configparser
import pandas as pd
try:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
PYGMENTS = True
except:
PYGMENTS = False
import zipline
DEFAULTS = {
'start': '2012-01-01',
'end': '2012-12-31',
'data_frequency': 'daily',
'capital_base': '10e6',
'source': 'yahoo',
'symbols': 'AAPL'
}
def parse_args(argv, ipython_mode=False):
"""Parse list of arguments.
If a config file is provided (via -c), it will read in the
supplied options and overwrite any global defaults.
All other directly supplied arguments will overwrite the config
file settings.
Arguments:
* argv : list of strings
List of arguments, e.g. ['-c', 'my.conf']
* ipython_mode : bool <default=True>
Whether to parse IPython specific arguments
like --local_namespace
Notes:
Default settings can be found in zipline.utils.cli.DEFAULTS.
"""
# Parse any conf_file specification
# We make this parser with add_help=False so that
# it doesn't parse -h and print help.
conf_parser = argparse.ArgumentParser(
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
# Turn off help, so we print all options in response to -h
add_help=False
)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file",
metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(argv)
defaults = copy(DEFAULTS)
if args.conf_file:
config = configparser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("Defaults")))
# Parse rest of arguments
# Don't suppress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
description="Zipline version %s." % zipline.__version__,
parents=[conf_parser]
)
parser.set_defaults(**defaults)
parser.add_argument('--algofile', '-f')
parser.add_argument('--data-frequency',
choices=('minute', 'daily'))
parser.add_argument('--start', '-s')
parser.add_argument('--end', '-e')
parser.add_argument('--capital_base')
parser.add_argument('--source', choices=('yahoo',))
parser.add_argument('--symbols')
parser.add_argument('--output', '-o')
if ipython_mode:
parser.add_argument('--local_namespace', action='store_true')
args = parser.parse_args(remaining_argv)
return(vars(args))
def parse_cell_magic(line, cell):
"""Parse IPython magic
"""
args_list = line.split(' ')
args = parse_args(args_list, ipython_mode=True)
local_namespace = args.pop('local_namespace', False)
# By default, execute inside IPython namespace
if not local_namespace:
args['namespace'] = get_ipython().user_ns # flake8: noqa
# If we are running inside NB, do not output to file but create a
# variable instead
output_var_name = args.pop('output', None)
perf = run_pipeline(print_algo=False, algo_text=cell, **args)
if output_var_name is not None:
get_ipython().user_ns[output_var_name] = perf # flake8: noqa
def run_pipeline(print_algo=True, **kwargs):
"""Runs a full zipline pipeline given configuration keyword
arguments.
1. Load data (start and end dates can be provided a strings as
well as the source and symobls).
2. Instantiate algorithm (supply either algo_text or algofile
kwargs containing initialize() and handle_data() functions). If
algofile is supplied, will try to look for algofile_analyze.py and
append it.
3. Run algorithm (supply capital_base as float).
4. Return performance dataframe.
:Arguments:
* print_algo : bool <default=True>
Whether to print the algorithm to command line. Will use
pygments syntax coloring if pygments is found.
"""
start = pd.Timestamp(kwargs['start'], tz='UTC')
end = pd.Timestamp(kwargs['end'], tz='UTC')
symbols = kwargs['symbols'].split(',')
if kwargs['source'] == 'yahoo':
source = zipline.data.load_bars_from_yahoo(
stocks=symbols, start=start, end=end)
else:
raise NotImplementedError(
'Source %s not implemented.' % kwargs['source'])
algo_text = kwargs.get('algo_text', None)
if algo_text is None:
# Expect algofile to be set
algo_fname = kwargs['algofile']
with open(algo_fname, 'r') as fd:
algo_text = fd.read()
analyze_fname = os.path.splitext(algo_fname)[0] + '_analyze.py'
if os.path.exists(analyze_fname):
with open(analyze_fname, 'r') as fd:
# Simply append
algo_text += fd.read()
if print_algo:
if PYGMENTS:
highlight(algo_text, PythonLexer(), TerminalFormatter(),
outfile=sys.stdout)
else:
print_(algo_text)
algo = zipline.TradingAlgorithm(script=algo_text,
namespace=kwargs.get('namespace', {}),
capital_base=float(kwargs['capital_base']),
algo_filename=kwargs.get('algofile'))
perf = algo.run(source)
output_fname = kwargs.get('output', None)
if output_fname is not None:
perf.to_pickle(output_fname)
return perf
| apache-2.0 |
ssalonen/financedatahoarder | financedatahoarder/services/tests/test_data_access_api.py | 1 | 15053 | from functools import partial
from itertools import chain
import pkg_resources
import grequests
from financedatahoarder.services import data_access_api
from financedatahoarder.services.data_access_api import NonCachingAsyncRequestsClient
from nose.tools import eq_
from nose_parameterized import parameterized
from datetime import datetime, date, timedelta
from mock import call, patch
import pandas as pd
import logging
def dummy_map(reqs, *args, **kwargs):
return [object()] * len(reqs)
def _assert_equal_url_method_params_same(asyncresult_expected, asyncresult_actual):
"""Assert that url, method and params are equal"""
eq_(asyncresult_expected.url, asyncresult_actual.url)
eq_(asyncresult_expected.method, asyncresult_actual.method)
eq_(asyncresult_expected.kwargs, asyncresult_actual.kwargs)
class DummyResponse(object):
def __init__(self, content, status_code):
self.content = content
self.status_code = status_code
self.url = 'dummy response url'
def close(self): pass
def _yield_test_query_key_stats_correct_http_requests_data():
for date_interval in [(datetime(2015, 1, 1, 12, 0), datetime(2015, 1, 2, 12, 0)),
(date(2015, 1, 1), date(2015, 1, 2))]:
for urls, expected_requests in [
(['http://url1.com', 'http://url2.com', 'http://url3.com'],
[grequests.get('http://basehost.com/basepath/20150101/http://url1.com', params={}),
grequests.get('http://basehost.com/basepath/20150101/http://url2.com', params={}),
grequests.get('http://basehost.com/basepath/20150101/http://url3.com', params={}),
grequests.get('http://basehost.com/basepath/20150102/http://url1.com', params={}),
grequests.get('http://basehost.com/basepath/20150102/http://url2.com', params={}),
grequests.get('http://basehost.com/basepath/20150102/http://url3.com', params={})]),
(['http://url1.com', 'http://url2.com?id=2', 'http://url3.com?id=3'],
[grequests.get('http://basehost.com/basepath/20150101/http://url1.com', params={}),
grequests.get('http://basehost.com/basepath/20150101/http://url2.com', params={'id': '2'}),
grequests.get('http://basehost.com/basepath/20150101/http://url3.com', params={'id': '3'}),
grequests.get('http://basehost.com/basepath/20150102/http://url1.com', params={}),
grequests.get('http://basehost.com/basepath/20150102/http://url2.com', params={'id': '2'}),
grequests.get('http://basehost.com/basepath/20150102/http://url3.com', params={'id': '3'})])
]:
yield date_interval, urls, 'http://basehost.com/basepath/', 4, expected_requests
@parameterized(_yield_test_query_key_stats_correct_http_requests_data())
def test_query_key_stats_correct_http_requests(date_interval, urls, base_replay_url, grequests_pool_size,
expected_requests):
client = NonCachingAsyncRequestsClient(base_replay_url, grequests_pool_size)
with patch.object(data_access_api.grequests, 'map', side_effect=dummy_map) as grequests_map, \
patch.object(client, '_cdx_list') as cdx_list, \
patch.object(data_access_api, 'parse_overview_key_stats_from_responses'):
cdx_list.return_value = {
url: pd.Series(['http://basehost.com/basepath/{}/{}'.format(date.strftime('%Y%m%d'), url)
for date in pd.date_range(*date_interval)],
index=[date for date in pd.date_range(*date_interval)])
for url in urls
}
_ = client.query_key_stats(date_interval, urls)
cdx_list.assert_has_calls([call([url]) for url in urls])
# eq_(len(grequests_map.call_args_list), len(urls))
for map_args, _ in grequests_map.call_args_list:
eq_(len(map_args), 1)
# number of requests should match the expected
eq_(sum(len(map_args[0]) for map_args, _ in grequests_map.call_args_list),
len(expected_requests))
# Verify the actual requests
for actual_requests_for_url, actual_kwargs in grequests_map.call_args_list:
# Unpack argument-tuple
actual_requests_for_url, = actual_requests_for_url
eq_(actual_kwargs, {'size': grequests_pool_size})
expected_requests_by_url = {r.url: r for r in expected_requests}
for actual_request in actual_requests_for_url:
expected_request = expected_requests_by_url[actual_request.url]
_assert_equal_url_method_params_same(expected_request, actual_request)
def _dummy_url_from_id(id):
return 'http://dummyrequest.{id}.html'.format(id=id.replace('.html', ''))
def _dummy_url_from_filename(filename):
return _dummy_url_from_id(filename.rpartition('_')[-1])
def _yield_test_query_key_stats_parsing_funds_http_200_data():
url_to_expected_values = {
'funds_snapshot_20150310_F0GBR04O2R.html': (pd.Timestamp('2015-03-09T00:00Z'), 6.65),
'funds_snapshot_20150313_F0GBR04O2R.html': (pd.Timestamp('2015-03-13T00:00Z'), 6.74),
'funds_snapshot_20150314_F0GBR04O2R.html': (pd.Timestamp('2015-03-13T00:00Z'), 6.74),
'funds_snapshot_20150311_F0GBR04O2J.html': (pd.Timestamp('2015-03-10T00:00Z'), 2.51),
'funds_snapshot_20150313_F0GBR04O2J.html': (pd.Timestamp('2015-03-12T00:00Z'), 2.51),
'funds_snapshot_20150314_F0GBR04O2J.html': (pd.Timestamp('2015-03-13T00:00Z'), 2.51),
'etf_snapshot_20150312_0P0000M7ZP.html': (pd.Timestamp('2015-03-12T00:00:00Z'), 116.18),
'stock_20150320_knebv.html': (pd.Timestamp('2015-03-20T00:00Z'), 42.41)
}
def _return_test_data(ids_and_response_filenames, result_filenames):
"""The system will query all instruments defined in `ids_and_response_filenames` (instrument id to response).
Assumption is that the requests are made with following order 1) instrument 2) date. This means that single
instrument is queried first for all dates, before continueing with other instruments.
result_filenames basically match entries of the key_stats list. Filenames are parsed to dict entries
using `url_to_expected_values` above.
"""
logger = logging.getLogger('_return_test_data')
# Assert that test data is OK -- we should have equal number of responses
expected_len = None
for instrument_id, response_filenames in ids_and_response_filenames:
if expected_len is None:
expected_len = len(response_filenames)
continue
else:
assert len(response_filenames) == expected_len
# we should query one instrument first, then proceed to next etc.
response_filenames_flat_query_order = []
for _, response_filenames in ids_and_response_filenames:
response_filenames_flat_query_order.extend(response_filenames)
urls = [_dummy_url_from_id(instrument_id) for instrument_id, _ in ids_and_response_filenames]
logger.debug(urls)
return urls, response_filenames_flat_query_order, [{'value_date': url_to_expected_values[result_filename][0],
'value': url_to_expected_values[result_filename][1],
'instrument_url': _dummy_url_from_filename(result_filename)}
for result_filename in result_filenames]
#
# Funds
#
# one fund, in time order
yield _return_test_data([('F0GBR04O2R', ['funds_snapshot_20150310_F0GBR04O2R.html',
'funds_snapshot_20150314_F0GBR04O2R.html'])],
['funds_snapshot_20150310_F0GBR04O2R.html', 'funds_snapshot_20150314_F0GBR04O2R.html'])
# Same output even if the responses do not follow the logical chrono order
yield _return_test_data([('F0GBR04O2R', ['funds_snapshot_20150314_F0GBR04O2R.html',
'funds_snapshot_20150310_F0GBR04O2R.html'])],
['funds_snapshot_20150310_F0GBR04O2R.html', 'funds_snapshot_20150314_F0GBR04O2R.html'])
yield _return_test_data([('F0GBR04O2R', ['funds_snapshot_20150314_F0GBR04O2R.html', # 2015-03-13
'funds_snapshot_20150310_F0GBR04O2R.html', # 2015-03-09
'funds_snapshot_20150313_F0GBR04O2R.html', # 2015-03-13
]),
('F0GBR04O2J', ['funds_snapshot_20150311_F0GBR04O2J.html', # 2015-03-10
'funds_snapshot_20150313_F0GBR04O2J.html', # 2015-03-12
'funds_snapshot_20150314_F0GBR04O2J.html', # 2015-03-13
]),
],
[
# 2015-03-09
'funds_snapshot_20150310_F0GBR04O2R.html',
# 2015-03-10
'funds_snapshot_20150311_F0GBR04O2J.html',
# 2015-03-11
# 2015-03-12
'funds_snapshot_20150313_F0GBR04O2J.html',
# 2015-03-13, First R and then J due to query query order
'funds_snapshot_20150313_F0GBR04O2R.html',
'funds_snapshot_20150314_F0GBR04O2J.html',
])
# Otherwise same but different query order
yield _return_test_data([('F0GBR04O2J', ['funds_snapshot_20150311_F0GBR04O2J.html', # 2015-03-10
'funds_snapshot_20150313_F0GBR04O2J.html', # 2015-03-12
'funds_snapshot_20150314_F0GBR04O2J.html', # 2015-03-13
]),
('F0GBR04O2R', ['funds_snapshot_20150314_F0GBR04O2R.html', # 2015-03-13
'funds_snapshot_20150310_F0GBR04O2R.html', # 2015-03-09
'funds_snapshot_20150313_F0GBR04O2R.html', # 2015-03-13
]),
],
[
# 2015-03-09
'funds_snapshot_20150310_F0GBR04O2R.html',
# 2015-03-10
'funds_snapshot_20150311_F0GBR04O2J.html',
# 2015-03-11
# 2015-03-12
'funds_snapshot_20150313_F0GBR04O2J.html',
# 2015-03-13, First J and then R due to query query order
'funds_snapshot_20150314_F0GBR04O2J.html',
'funds_snapshot_20150313_F0GBR04O2R.html',
])
# With some invalid responses
yield _return_test_data([('F0GBR04O2J', ['invalid.html', #
'funds_snapshot_20150313_F0GBR04O2J.html', # 2015-03-12
'funds_snapshot_20150314_F0GBR04O2J.html', # 2015-03-13
]),
('F0GBR04O2R', ['funds_snapshot_20150314_F0GBR04O2R.html', # 2015-03-13
'funds_snapshot_20150310_F0GBR04O2R.html', # 2015-03-09
'funds_snapshot_20150313_F0GBR04O2R.html', # 2015-03-13
]),
],
[
# 2015-03-09
'funds_snapshot_20150310_F0GBR04O2R.html',
# 2015-03-10
# invalid'funds_snapshot_20150311_F0GBR04O2J.html',
# 2015-03-11
# 2015-03-12
'funds_snapshot_20150313_F0GBR04O2J.html',
# 2015-03-13, First J and then R due to query query order
'funds_snapshot_20150314_F0GBR04O2J.html',
'funds_snapshot_20150313_F0GBR04O2R.html',
])
# ETF
yield _return_test_data([('0P0000M7ZP', ['etf_snapshot_20150312_0P0000M7ZP.html']),
],
[
'etf_snapshot_20150312_0P0000M7ZP.html'
])
# Stock
yield _return_test_data([('knebv', ['stock_20150320_knebv.html']),
],
[
'stock_20150320_knebv.html'
])
@parameterized(_yield_test_query_key_stats_parsing_funds_http_200_data())
def test_query_key_stats_parsing_parsing_errors_but_all_http_200(urls, response_filenames, expected_key_stats):
logger = logging.getLogger('test_query_key_stats_parsing_http_200')
client = NonCachingAsyncRequestsClient('http://dummybaseurl.com', 4)
with patch.object(data_access_api.grequests, 'map') as grequests_map, \
patch.object(client, '_cdx_list'), \
patch.object(data_access_api, 'prepare_replay_get'):
responses = [pkg_resources.resource_stream(
'financedatahoarder.services.tests', 'testdata/{}'.format(filename)).read() for filename in response_filenames]
logger.debug(urls)
num_requests = len(response_filenames)
num_funds = len(urls)
# Mock responses by the url
grequest_map_return_values = [resp for resp in responses]
dummy_responses = iter([[DummyResponse(ret, status_code=200)]
for ret in grequest_map_return_values])
def _map_side_effect(reqs, *args, **kwargs):
assert len(reqs) == 1
return next(dummy_responses)
grequests_map.side_effect = _map_side_effect
# We should have num_requests should be evenly divisible by num_funds
assert num_requests / float(num_funds) == int(num_requests / num_funds)
num_times = int(num_requests / num_funds)
actual = client.query_key_stats((date(2015, 1, 1), date(2015, 1, 1) + timedelta(days=num_times - 1)),
urls)
# Basic assertion that input test data is correct
actual_args, actual_kwargs = grequests_map.call_args_list[0]
eq_(actual_kwargs, {'size': 4})
eq_(sum(len(map_args[0]) for map_args, _ in grequests_map.call_args_list),
len(response_filenames))
eq_(expected_key_stats, actual)
print actual | bsd-3-clause |
kiszk/spark | python/pyspark/sql/group.py | 24 | 12490 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix, PythonEvalType
from pyspark.sql.column import Column, _to_seq
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import *
__all__ = ["GroupedData"]
def dfapi(f):
def _api(self):
name = f.__name__
jdf = getattr(self._jgd, name)()
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
def df_varargs_api(f):
def _api(self, *cols):
name = f.__name__
jdf = getattr(self._jgd, name)(_to_seq(self.sql_ctx._sc, cols))
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
class GroupedData(object):
"""
A set of methods for aggregations on a :class:`DataFrame`,
created by :func:`DataFrame.groupBy`.
.. note:: Experimental
.. versionadded:: 1.3
"""
def __init__(self, jgd, df):
self._jgd = jgd
self._df = df
self.sql_ctx = df.sql_ctx
@ignore_unicode_prefix
@since(1.3)
def agg(self, *exprs):
"""Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions can be:
1. built-in aggregation functions, such as `avg`, `max`, `min`, `sum`, `count`
2. group aggregate pandas UDFs, created with :func:`pyspark.sql.functions.pandas_udf`
.. note:: There is no partial aggregation with group aggregate UDFs, i.e.,
a full shuffle is required. Also, all the data of a group will be loaded into
memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. seealso:: :func:`pyspark.sql.functions.pandas_udf`
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
.. note:: Built-in aggregation functions and group aggregate pandas UDFs cannot be mixed
in a single call to this function.
:param exprs: a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
>>> gdf = df.groupBy(df.name)
>>> sorted(gdf.agg({"*": "count"}).collect())
[Row(name=u'Alice', count(1)=1), Row(name=u'Bob', count(1)=1)]
>>> from pyspark.sql import functions as F
>>> sorted(gdf.agg(F.min(df.age)).collect())
[Row(name=u'Alice', min(age)=2), Row(name=u'Bob', min(age)=5)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf('int', PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def min_udf(v):
... return v.min()
>>> sorted(gdf.agg(min_udf(df.age)).collect()) # doctest: +SKIP
[Row(name=u'Alice', min_udf(age)=2), Row(name=u'Bob', min_udf(age)=5)]
"""
assert exprs, "exprs should not be empty"
if len(exprs) == 1 and isinstance(exprs[0], dict):
jdf = self._jgd.agg(exprs[0])
else:
# Columns
assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
jdf = self._jgd.agg(exprs[0]._jc,
_to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]]))
return DataFrame(jdf, self.sql_ctx)
@dfapi
@since(1.3)
def count(self):
"""Counts the number of records for each group.
>>> sorted(df.groupBy(df.age).count().collect())
[Row(age=2, count=1), Row(age=5, count=1)]
"""
@df_varargs_api
@since(1.3)
def mean(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().mean('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().mean('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
@since(1.3)
def avg(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().avg('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().avg('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
@since(1.3)
def max(self, *cols):
"""Computes the max value for each numeric columns for each group.
>>> df.groupBy().max('age').collect()
[Row(max(age)=5)]
>>> df3.groupBy().max('age', 'height').collect()
[Row(max(age)=5, max(height)=85)]
"""
@df_varargs_api
@since(1.3)
def min(self, *cols):
"""Computes the min value for each numeric column for each group.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().min('age').collect()
[Row(min(age)=2)]
>>> df3.groupBy().min('age', 'height').collect()
[Row(min(age)=2, min(height)=80)]
"""
@df_varargs_api
@since(1.3)
def sum(self, *cols):
"""Compute the sum for each numeric columns for each group.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().sum('age').collect()
[Row(sum(age)=7)]
>>> df3.groupBy().sum('age', 'height').collect()
[Row(sum(age)=7, sum(height)=165)]
"""
@since(1.6)
def pivot(self, pivot_col, values=None):
"""
Pivots a column of the current :class:`DataFrame` and perform the specified aggregation.
There are two versions of pivot function: one that requires the caller to specify the list
of distinct values to pivot on, and one that does not. The latter is more concise but less
efficient, because Spark needs to first compute the list of distinct values internally.
:param pivot_col: Name of the column to pivot.
:param values: List of values that will be translated to columns in the output DataFrame.
# Compute the sum of earnings for each year by course with each course as a separate column
>>> df4.groupBy("year").pivot("course", ["dotNET", "Java"]).sum("earnings").collect()
[Row(year=2012, dotNET=15000, Java=20000), Row(year=2013, dotNET=48000, Java=30000)]
# Or without specifying column values (less efficient)
>>> df4.groupBy("year").pivot("course").sum("earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
>>> df5.groupBy("sales.year").pivot("sales.course").sum("sales.earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
"""
if values is None:
jgd = self._jgd.pivot(pivot_col)
else:
jgd = self._jgd.pivot(pivot_col, values)
return GroupedData(jgd, self._df)
@since(2.3)
def apply(self, udf):
"""
Maps each group of the current :class:`DataFrame` using a pandas udf and returns the result
as a `DataFrame`.
The user-defined function should take a `pandas.DataFrame` and return another
`pandas.DataFrame`. For each group, all columns are passed together as a `pandas.DataFrame`
to the user-function and the returned `pandas.DataFrame` are combined as a
:class:`DataFrame`.
The returned `pandas.DataFrame` can be of arbitrary length and its schema must match the
returnType of the pandas udf.
.. note:: This function requires a full shuffle. all the data of a group will be loaded
into memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. note:: Experimental
:param udf: a grouped map user-defined function returned by
:func:`pyspark.sql.functions.pandas_udf`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
.. seealso:: :meth:`pyspark.sql.functions.pandas_udf`
"""
# Columns are special because hasattr always return True
if isinstance(udf, Column) or not hasattr(udf, 'func') \
or udf.evalType != PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
raise ValueError("Invalid udf: the udf argument must be a pandas_udf of type "
"GROUPED_MAP.")
df = self._df
udf_column = udf(*[df[col] for col in df.columns])
jdf = self._jgd.flatMapGroupsInPandas(udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx)
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.group
globs = pyspark.sql.group.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.group tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df3'] = sc.parallelize([Row(name='Alice', age=2, height=80),
Row(name='Bob', age=5, height=85)]).toDF()
globs['df4'] = sc.parallelize([Row(course="dotNET", year=2012, earnings=10000),
Row(course="Java", year=2012, earnings=20000),
Row(course="dotNET", year=2012, earnings=5000),
Row(course="dotNET", year=2013, earnings=48000),
Row(course="Java", year=2013, earnings=30000)]).toDF()
globs['df5'] = sc.parallelize([
Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=10000)),
Row(training="junior", sales=Row(course="Java", year=2012, earnings=20000)),
Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=5000)),
Row(training="junior", sales=Row(course="dotNET", year=2013, earnings=48000)),
Row(training="expert", sales=Row(course="Java", year=2013, earnings=30000))]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.group, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
larrybradley/astropy | astropy/nddata/ccddata.py | 5 | 27852 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module implements the base CCDData class."""
import itertools
import numpy as np
from .compat import NDDataArray
from .nduncertainty import (
StdDevUncertainty, NDUncertainty, VarianceUncertainty, InverseVariance)
from astropy.io import fits, registry
from astropy import units as u
from astropy import log
from astropy.wcs import WCS
from astropy.utils.decorators import sharedmethod
__all__ = ['CCDData', 'fits_ccddata_reader', 'fits_ccddata_writer']
_known_uncertainties = (StdDevUncertainty, VarianceUncertainty, InverseVariance)
_unc_name_to_cls = {cls.__name__: cls for cls in _known_uncertainties}
_unc_cls_to_name = {cls: cls.__name__ for cls in _known_uncertainties}
# Global value which can turn on/off the unit requirements when creating a
# CCDData. Should be used with care because several functions actually break
# if the unit is None!
_config_ccd_requires_unit = True
def _arithmetic(op):
"""Decorator factory which temporarly disables the need for a unit when
creating a new CCDData instance. The final result must have a unit.
Parameters
----------
op : function
The function to apply. Supported are:
- ``np.add``
- ``np.subtract``
- ``np.multiply``
- ``np.true_divide``
Notes
-----
Should only be used on CCDData ``add``, ``subtract``, ``divide`` or
``multiply`` because only these methods from NDArithmeticMixin are
overwritten.
"""
def decorator(func):
def inner(self, operand, operand2=None, **kwargs):
global _config_ccd_requires_unit
_config_ccd_requires_unit = False
result = self._prepare_then_do_arithmetic(op, operand,
operand2, **kwargs)
# Wrap it again as CCDData so it checks the final unit.
_config_ccd_requires_unit = True
return result.__class__(result)
inner.__doc__ = f"See `astropy.nddata.NDArithmeticMixin.{func.__name__}`."
return sharedmethod(inner)
return decorator
def _uncertainty_unit_equivalent_to_parent(uncertainty_type, unit, parent_unit):
if uncertainty_type is StdDevUncertainty:
return unit == parent_unit
elif uncertainty_type is VarianceUncertainty:
return unit == (parent_unit ** 2)
elif uncertainty_type is InverseVariance:
return unit == (1 / (parent_unit ** 2))
raise ValueError(f"unsupported uncertainty type: {uncertainty_type}")
class CCDData(NDDataArray):
"""A class describing basic CCD data.
The CCDData class is based on the NDData object and includes a data array,
uncertainty frame, mask frame, flag frame, meta data, units, and WCS
information for a single CCD image.
Parameters
-----------
data : `~astropy.nddata.CCDData`-like or array-like
The actual data contained in this `~astropy.nddata.CCDData` object.
Note that the data will always be saved by *reference*, so you should
make a copy of the ``data`` before passing it in if that's the desired
behavior.
uncertainty : `~astropy.nddata.StdDevUncertainty`, \
`~astropy.nddata.VarianceUncertainty`, \
`~astropy.nddata.InverseVariance`, `numpy.ndarray` or \
None, optional
Uncertainties on the data. If the uncertainty is a `numpy.ndarray`, it
it assumed to be, and stored as, a `~astropy.nddata.StdDevUncertainty`.
Default is ``None``.
mask : `numpy.ndarray` or None, optional
Mask for the data, given as a boolean Numpy array with a shape
matching that of the data. The values must be `False` where
the data is *valid* and `True` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
Default is ``None``.
flags : `numpy.ndarray` or `~astropy.nddata.FlagCollection` or None, \
optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
Default is ``None``.
wcs : `~astropy.wcs.WCS` or None, optional
WCS-object containing the world coordinate system for the data.
Default is ``None``.
meta : dict-like object or None, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object, e.g. creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.Unit` or str, optional
The units of the data.
Default is ``None``.
.. warning::
If the unit is ``None`` or not otherwise specified it will raise a
``ValueError``
Raises
------
ValueError
If the ``uncertainty`` or ``mask`` inputs cannot be broadcast (e.g.,
match shape) onto ``data``.
Methods
-------
read(\\*args, \\**kwargs)
``Classmethod`` to create an CCDData instance based on a ``FITS`` file.
This method uses :func:`fits_ccddata_reader` with the provided
parameters.
write(\\*args, \\**kwargs)
Writes the contents of the CCDData instance into a new ``FITS`` file.
This method uses :func:`fits_ccddata_writer` with the provided
parameters.
Attributes
----------
known_invalid_fits_unit_strings
A dictionary that maps commonly-used fits unit name strings that are
technically invalid to the correct valid unit type (or unit string).
This is primarily for variant names like "ELECTRONS/S" which are not
formally valid, but are unambiguous and frequently enough encountered
that it is convenient to map them to the correct unit.
Notes
-----
`~astropy.nddata.CCDData` objects can be easily converted to a regular
Numpy array using `numpy.asarray`.
For example::
>>> from astropy.nddata import CCDData
>>> import numpy as np
>>> x = CCDData([1,2,3], unit='adu')
>>> np.asarray(x)
array([1, 2, 3])
This is useful, for example, when plotting a 2D image using
matplotlib.
>>> from astropy.nddata import CCDData
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> x = CCDData([[1,2,3], [4,5,6]], unit='adu')
>>> plt.imshow(x) # doctest: +SKIP
"""
def __init__(self, *args, **kwd):
if 'meta' not in kwd:
kwd['meta'] = kwd.pop('header', None)
if 'header' in kwd:
raise ValueError("can't have both header and meta.")
super().__init__(*args, **kwd)
if self._wcs is not None:
llwcs = self._wcs.low_level_wcs
if not isinstance(llwcs, WCS):
raise TypeError("the wcs must be a WCS instance.")
self._wcs = llwcs
# Check if a unit is set. This can be temporarly disabled by the
# _CCDDataUnit contextmanager.
if _config_ccd_requires_unit and self.unit is None:
raise ValueError("a unit for CCDData must be specified.")
def _slice_wcs(self, item):
"""
Override the WCS slicing behaviour so that the wcs attribute continues
to be an `astropy.wcs.WCS`.
"""
if self.wcs is None:
return None
try:
return self.wcs[item]
except Exception as err:
self._handle_wcs_slicing_error(err, item)
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def wcs(self):
return self._wcs
@wcs.setter
def wcs(self, value):
if not isinstance(value, WCS):
raise TypeError("the wcs must be a WCS instance.")
self._wcs = value
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
self._unit = u.Unit(value)
@property
def header(self):
return self._meta
@header.setter
def header(self, value):
self.meta = value
@property
def uncertainty(self):
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
if isinstance(value, NDUncertainty):
if getattr(value, '_parent_nddata', None) is not None:
value = value.__class__(value, copy=False)
self._uncertainty = value
elif isinstance(value, np.ndarray):
if value.shape != self.shape:
raise ValueError("uncertainty must have same shape as "
"data.")
self._uncertainty = StdDevUncertainty(value)
log.info("array provided for uncertainty; assuming it is a "
"StdDevUncertainty.")
else:
raise TypeError("uncertainty must be an instance of a "
"NDUncertainty object or a numpy array.")
self._uncertainty.parent_nddata = self
else:
self._uncertainty = value
def to_hdu(self, hdu_mask='MASK', hdu_uncertainty='UNCERT',
hdu_flags=None, wcs_relax=True, key_uncertainty_type='UTYPE'):
"""Creates an HDUList object from a CCDData object.
Parameters
----------
hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and
``None`` for flags.
wcs_relax : bool
Value of the ``relax`` parameter to use in converting the WCS to a
FITS header using `~astropy.wcs.WCS.to_header`. The common
``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires
``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be
preserved.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
Raises
-------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a astropy uncertainty type.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
if isinstance(self.header, fits.Header):
# Copy here so that we can modify the HDU header by adding WCS
# information without changing the header of the CCDData object.
header = self.header.copy()
else:
# Because _insert_in_metadata_fits_safe is written as a method
# we need to create a dummy CCDData instance to hold the FITS
# header we are constructing. This probably indicates that
# _insert_in_metadata_fits_safe should be rewritten in a more
# sensible way...
dummy_ccd = CCDData([1], meta=fits.Header(), unit="adu")
for k, v in self.header.items():
dummy_ccd._insert_in_metadata_fits_safe(k, v)
header = dummy_ccd.header
if self.unit is not u.dimensionless_unscaled:
header['bunit'] = self.unit.to_string()
if self.wcs:
# Simply extending the FITS header with the WCS can lead to
# duplicates of the WCS keywords; iterating over the WCS
# header should be safer.
#
# Turns out if I had read the io.fits.Header.extend docs more
# carefully, I would have realized that the keywords exist to
# avoid duplicates and preserve, as much as possible, the
# structure of the commentary cards.
#
# Note that until astropy/astropy#3967 is closed, the extend
# will fail if there are comment cards in the WCS header but
# not header.
wcs_header = self.wcs.to_header(relax=wcs_relax)
header.extend(wcs_header, useblanks=False, update=True)
hdus = [fits.PrimaryHDU(self.data, header)]
if hdu_mask and self.mask is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.mask, 'shape'):
raise ValueError('only a numpy.ndarray mask can be saved.')
# Convert boolean mask to uint since io.fits cannot handle bool.
hduMask = fits.ImageHDU(self.mask.astype(np.uint8), name=hdu_mask)
hdus.append(hduMask)
if hdu_uncertainty and self.uncertainty is not None:
# We need to save some kind of information which uncertainty was
# used so that loading the HDUList can infer the uncertainty type.
# No idea how this can be done so only allow StdDevUncertainty.
uncertainty_cls = self.uncertainty.__class__
if uncertainty_cls not in _known_uncertainties:
raise ValueError('only uncertainties of type {} can be saved.'
.format(_known_uncertainties))
uncertainty_name = _unc_cls_to_name[uncertainty_cls]
hdr_uncertainty = fits.Header()
hdr_uncertainty[key_uncertainty_type] = uncertainty_name
# Assuming uncertainty is an StdDevUncertainty save just the array
# this might be problematic if the Uncertainty has a unit differing
# from the data so abort for different units. This is important for
# astropy > 1.2
if (hasattr(self.uncertainty, 'unit') and
self.uncertainty.unit is not None):
if not _uncertainty_unit_equivalent_to_parent(
uncertainty_cls, self.uncertainty.unit, self.unit):
raise ValueError(
'saving uncertainties with a unit that is not '
'equivalent to the unit from the data unit is not '
'supported.')
hduUncert = fits.ImageHDU(self.uncertainty.array, hdr_uncertainty,
name=hdu_uncertainty)
hdus.append(hduUncert)
if hdu_flags and self.flags:
raise NotImplementedError('adding the flags to a HDU is not '
'supported at this time.')
hdulist = fits.HDUList(hdus)
return hdulist
def copy(self):
"""
Return a copy of the CCDData object.
"""
return self.__class__(self, copy=True)
add = _arithmetic(np.add)(NDDataArray.add)
subtract = _arithmetic(np.subtract)(NDDataArray.subtract)
multiply = _arithmetic(np.multiply)(NDDataArray.multiply)
divide = _arithmetic(np.true_divide)(NDDataArray.divide)
def _insert_in_metadata_fits_safe(self, key, value):
"""
Insert key/value pair into metadata in a way that FITS can serialize.
Parameters
----------
key : str
Key to be inserted in dictionary.
value : str or None
Value to be inserted.
Notes
-----
This addresses a shortcoming of the FITS standard. There are length
restrictions on both the ``key`` (8 characters) and ``value`` (72
characters) in the FITS standard. There is a convention for handling
long keywords and a convention for handling long values, but the
two conventions cannot be used at the same time.
This addresses that case by checking the length of the ``key`` and
``value`` and, if necessary, shortening the key.
"""
if len(key) > 8 and len(value) > 72:
short_name = key[:8]
self.meta[f'HIERARCH {key.upper()}'] = (
short_name, f"Shortened name for {key}")
self.meta[short_name] = value
else:
self.meta[key] = value
# A dictionary mapping "known" invalid fits unit
known_invalid_fits_unit_strings = {'ELECTRONS/S': u.electron/u.s,
'ELECTRONS': u.electron,
'electrons': u.electron}
# These need to be importable by the tests...
_KEEP_THESE_KEYWORDS_IN_HEADER = [
'JD-OBS',
'MJD-OBS',
'DATE-OBS'
]
_PCs = set(['PC1_1', 'PC1_2', 'PC2_1', 'PC2_2'])
_CDs = set(['CD1_1', 'CD1_2', 'CD2_1', 'CD2_2'])
def _generate_wcs_and_update_header(hdr):
"""
Generate a WCS object from a header and remove the WCS-specific
keywords from the header.
Parameters
----------
hdr : astropy.io.fits.header or other dict-like
Returns
-------
new_header, wcs
"""
# Try constructing a WCS object.
try:
wcs = WCS(hdr)
except Exception as exc:
# Normally WCS only raises Warnings and doesn't fail but in rare
# cases (malformed header) it could fail...
log.info('An exception happened while extracting WCS informations from '
'the Header.\n{}: {}'.format(type(exc).__name__, str(exc)))
return hdr, None
# Test for success by checking to see if the wcs ctype has a non-empty
# value, return None for wcs if ctype is empty.
if not wcs.wcs.ctype[0]:
return (hdr, None)
new_hdr = hdr.copy()
# If the keywords below are in the header they are also added to WCS.
# It seems like they should *not* be removed from the header, though.
wcs_header = wcs.to_header(relax=True)
for k in wcs_header:
if k not in _KEEP_THESE_KEYWORDS_IN_HEADER:
new_hdr.remove(k, ignore_missing=True)
# Check that this does not result in an inconsistent header WCS if the WCS
# is converted back to a header.
if (_PCs & set(wcs_header)) and (_CDs & set(new_hdr)):
# The PCi_j representation is used by the astropy.wcs object,
# so CDi_j keywords were not removed from new_hdr. Remove them now.
for cd in _CDs:
new_hdr.remove(cd, ignore_missing=True)
# The other case -- CD in the header produced by astropy.wcs -- should
# never happen based on [1], which computes the matrix in PC form.
# [1]: https://github.com/astropy/astropy/blob/1cf277926d3598dd672dd528504767c37531e8c9/cextern/wcslib/C/wcshdr.c#L596
#
# The test test_ccddata.test_wcs_keyword_removal_for_wcs_test_files() does
# check for the possibility that both PC and CD are present in the result
# so if the implementation of to_header changes in wcslib in the future
# then the tests should catch it, and then this code will need to be
# updated.
# We need to check for any SIP coefficients that got left behind if the
# header has SIP.
if wcs.sip is not None:
keyword = '{}_{}_{}'
polynomials = ['A', 'B', 'AP', 'BP']
for poly in polynomials:
order = wcs.sip.__getattribute__(f'{poly.lower()}_order')
for i, j in itertools.product(range(order), repeat=2):
new_hdr.remove(keyword.format(poly, i, j),
ignore_missing=True)
return (new_hdr, wcs)
def fits_ccddata_reader(filename, hdu=0, unit=None, hdu_uncertainty='UNCERT',
hdu_mask='MASK', hdu_flags=None,
key_uncertainty_type='UTYPE', **kwd):
"""
Generate a CCDData object from a FITS file.
Parameters
----------
filename : str
Name of fits file.
hdu : int, optional
FITS extension from which CCDData should be initialized. If zero and
and no data in the primary extension, it will search for the first
extension with data. The header will be added to the primary header.
Default is ``0``.
unit : `~astropy.units.Unit`, optional
Units of the image data. If this argument is provided and there is a
unit for the image in the FITS header (the keyword ``BUNIT`` is used
as the unit, if present), this argument is used for the unit.
Default is ``None``.
hdu_uncertainty : str or None, optional
FITS extension from which the uncertainty should be initialized. If the
extension does not exist the uncertainty of the CCDData is ``None``.
Default is ``'UNCERT'``.
hdu_mask : str or None, optional
FITS extension from which the mask should be initialized. If the
extension does not exist the mask of the CCDData is ``None``.
Default is ``'MASK'``.
hdu_flags : str or None, optional
Currently not implemented.
Default is ``None``.
key_uncertainty_type : str, optional
The header key name where the class name of the uncertainty is stored
in the hdu of the uncertainty (if any).
Default is ``UTYPE``.
.. versionadded:: 3.1
kwd :
Any additional keyword parameters are passed through to the FITS reader
in :mod:`astropy.io.fits`; see Notes for additional discussion.
Notes
-----
FITS files that contained scaled data (e.g. unsigned integer images) will
be scaled and the keywords used to manage scaled data in
:mod:`astropy.io.fits` are disabled.
"""
unsupport_open_keywords = {
'do_not_scale_image_data': 'Image data must be scaled.',
'scale_back': 'Scale information is not preserved.'
}
for key, msg in unsupport_open_keywords.items():
if key in kwd:
prefix = f'unsupported keyword: {key}.'
raise TypeError(' '.join([prefix, msg]))
with fits.open(filename, **kwd) as hdus:
hdr = hdus[hdu].header
if hdu_uncertainty is not None and hdu_uncertainty in hdus:
unc_hdu = hdus[hdu_uncertainty]
stored_unc_name = unc_hdu.header.get(key_uncertainty_type, 'None')
# For compatibility reasons the default is standard deviation
# uncertainty because files could have been created before the
# uncertainty type was stored in the header.
unc_type = _unc_name_to_cls.get(stored_unc_name, StdDevUncertainty)
uncertainty = unc_type(unc_hdu.data)
else:
uncertainty = None
if hdu_mask is not None and hdu_mask in hdus:
# Mask is saved as uint but we want it to be boolean.
mask = hdus[hdu_mask].data.astype(np.bool_)
else:
mask = None
if hdu_flags is not None and hdu_flags in hdus:
raise NotImplementedError('loading flags is currently not '
'supported.')
# search for the first instance with data if
# the primary header is empty.
if hdu == 0 and hdus[hdu].data is None:
for i in range(len(hdus)):
if (hdus.info(hdu)[i][3] == 'ImageHDU' and
hdus.fileinfo(i)['datSpan'] > 0):
hdu = i
comb_hdr = hdus[hdu].header.copy()
# Add header values from the primary header that aren't
# present in the extension header.
comb_hdr.extend(hdr, unique=True)
hdr = comb_hdr
log.info(f"first HDU with data is extension {hdu}.")
break
if 'bunit' in hdr:
fits_unit_string = hdr['bunit']
# patch to handle FITS files using ADU for the unit instead of the
# standard version of 'adu'
if fits_unit_string.strip().lower() == 'adu':
fits_unit_string = fits_unit_string.lower()
else:
fits_unit_string = None
if fits_unit_string:
if unit is None:
# Convert the BUNIT header keyword to a unit and if that's not
# possible raise a meaningful error message.
try:
kifus = CCDData.known_invalid_fits_unit_strings
if fits_unit_string in kifus:
fits_unit_string = kifus[fits_unit_string]
fits_unit_string = u.Unit(fits_unit_string)
except ValueError:
raise ValueError(
'The Header value for the key BUNIT ({}) cannot be '
'interpreted as valid unit. To successfully read the '
'file as CCDData you can pass in a valid `unit` '
'argument explicitly or change the header of the FITS '
'file before reading it.'
.format(fits_unit_string))
else:
log.info("using the unit {} passed to the FITS reader instead "
"of the unit {} in the FITS file."
.format(unit, fits_unit_string))
use_unit = unit or fits_unit_string
hdr, wcs = _generate_wcs_and_update_header(hdr)
ccd_data = CCDData(hdus[hdu].data, meta=hdr, unit=use_unit,
mask=mask, uncertainty=uncertainty, wcs=wcs)
return ccd_data
def fits_ccddata_writer(
ccd_data, filename, hdu_mask='MASK', hdu_uncertainty='UNCERT',
hdu_flags=None, key_uncertainty_type='UTYPE', **kwd):
"""
Write CCDData object to FITS file.
Parameters
----------
filename : str
Name of file.
hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and
``None`` for flags.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
kwd :
All additional keywords are passed to :py:mod:`astropy.io.fits`
Raises
-------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
"""
hdu = ccd_data.to_hdu(
hdu_mask=hdu_mask, hdu_uncertainty=hdu_uncertainty,
key_uncertainty_type=key_uncertainty_type, hdu_flags=hdu_flags)
hdu.writeto(filename, **kwd)
with registry.delay_doc_updates(CCDData):
registry.register_reader('fits', CCDData, fits_ccddata_reader)
registry.register_writer('fits', CCDData, fits_ccddata_writer)
registry.register_identifier('fits', CCDData, fits.connect.is_fits)
| bsd-3-clause |
jforbess/pvlib-python | pvlib/test/test_spa.py | 5 | 14836 | import os
import datetime as dt
import logging
pvl_logger = logging.getLogger('pvlib')
try:
from importlib import reload
except ImportError:
try:
from imp import reload
except ImportError:
pass
import numpy as np
import numpy.testing as npt
import pandas as pd
import unittest
from nose.tools import raises, assert_almost_equals
from nose.plugins.skip import SkipTest
from pvlib.location import Location
try:
from numba import __version__ as numba_version
numba_version_int = int(numba_version.split('.')[0] +
numba_version.split('.')[1])
except ImportError:
numba_version_int = 0
times = pd.date_range('2003-10-17 12:30:30', periods=1, freq='D').tz_localize('MST')
unixtimes = times.tz_convert('UTC').astype(np.int64)*1.0/10**9
lat = 39.742476
lon = -105.1786
elev = 1830.14
pressure = 820
temp = 11
delta_t = 67.0
atmos_refract= 0.5667
JD = 2452930.312847
JC = 0.0379277986858
JDE = 2452930.313623
JCE = 0.037927819916852
JME = 0.003792781991685
L = 24.0182616917
B = -0.0001011219
R = 0.9965422974
Theta = 204.0182616917
beta = 0.0001011219
X0 = 17185.861179
X1 = 1722.893218
X2 = 18234.075703
X3 = 18420.071012
X4 = 51.686951
dPsi = -0.00399840
dEpsilon = 0.00166657
epsilon0 = 84379.672625
epsilon = 23.440465
dTau = -0.005711
lamd = 204.0085519281
v0 = 318.515579
v = 318.511910
alpha = 202.227408
delta = -9.31434
H = 11.10590
xi = 0.002451
dAlpha = -0.000369
alpha_prime = 202.22704
delta_prime = -9.316179
H_prime = 11.10627
e0 = 39.872046
de = 0.016332
e = 39.888378
theta = 50.11162
theta0 = 90 - e0
Gamma = 14.340241
Phi = 194.340241
class SpaBase(object):
"""Test functions common to numpy and numba spa"""
def test_julian_day_dt(self):
dt = times.tz_convert('UTC')[0]
year = dt.year
month = dt.month
day = dt.day
hour = dt.hour
minute = dt.minute
second = dt.second
microsecond = dt.microsecond
assert_almost_equals(JD,
self.spa.julian_day_dt(year, month, day, hour,
minute, second, microsecond), 6)
def test_julian_ephemeris_day(self):
assert_almost_equals(JDE, self.spa.julian_ephemeris_day(JD, delta_t), 5)
def test_julian_century(self):
assert_almost_equals(JC, self.spa.julian_century(JD), 6)
def test_julian_ephemeris_century(self):
assert_almost_equals(JCE, self.spa.julian_ephemeris_century(JDE), 10)
def test_julian_ephemeris_millenium(self):
assert_almost_equals(JME, self.spa.julian_ephemeris_millennium(JCE), 10)
def test_heliocentric_longitude(self):
assert_almost_equals(L, self.spa.heliocentric_longitude(JME), 6)
def test_heliocentric_latitude(self):
assert_almost_equals(B, self.spa.heliocentric_latitude(JME), 6)
def test_heliocentric_radius_vector(self):
assert_almost_equals(R, self.spa.heliocentric_radius_vector(JME), 6)
def test_geocentric_longitude(self):
assert_almost_equals(Theta, self.spa.geocentric_longitude(L), 6)
def test_geocentric_latitude(self):
assert_almost_equals(beta, self.spa.geocentric_latitude(B), 6)
def test_mean_elongation(self):
assert_almost_equals(X0, self.spa.mean_elongation(JCE), 5)
def test_mean_anomaly_sun(self):
assert_almost_equals(X1, self.spa.mean_anomaly_sun(JCE), 5)
def test_mean_anomaly_moon(self):
assert_almost_equals(X2, self.spa.mean_anomaly_moon(JCE), 5)
def test_moon_argument_latitude(self):
assert_almost_equals(X3, self.spa.moon_argument_latitude(JCE), 5)
def test_moon_ascending_longitude(self):
assert_almost_equals(X4, self.spa.moon_ascending_longitude(JCE), 6)
def test_longitude_nutation(self):
assert_almost_equals(dPsi, self.spa.longitude_nutation(JCE, X0, X1, X2,
X3, X4), 6)
def test_obliquity_nutation(self):
assert_almost_equals(dEpsilon, self.spa.obliquity_nutation(JCE, X0, X1,
X2, X3, X4),
6)
def test_mean_ecliptic_obliquity(self):
assert_almost_equals(epsilon0, self.spa.mean_ecliptic_obliquity(JME), 6)
def test_true_ecliptic_obliquity(self):
assert_almost_equals(epsilon, self.spa.true_ecliptic_obliquity(
epsilon0, dEpsilon), 6)
def test_aberration_correction(self):
assert_almost_equals(dTau, self.spa.aberration_correction(R), 6)
def test_apparent_sun_longitude(self):
assert_almost_equals(lamd, self.spa.apparent_sun_longitude(
Theta, dPsi, dTau), 6)
def test_mean_sidereal_time(self):
assert_almost_equals(v0, self.spa.mean_sidereal_time(JD, JC), 3)
def test_apparent_sidereal_time(self):
assert_almost_equals(v, self.spa.apparent_sidereal_time(
v0, dPsi, epsilon), 5)
def test_geocentric_sun_right_ascension(self):
assert_almost_equals(alpha, self.spa.geocentric_sun_right_ascension(
lamd, epsilon, beta), 6)
def test_geocentric_sun_declination(self):
assert_almost_equals(delta, self.spa.geocentric_sun_declination(
lamd, epsilon, beta), 6)
def test_local_hour_angle(self):
assert_almost_equals(H, self.spa.local_hour_angle(v, lon, alpha), 4)
def test_equatorial_horizontal_parallax(self):
assert_almost_equals(xi, self.spa.equatorial_horizontal_parallax(R), 6)
def test_parallax_sun_right_ascension(self):
u = self.spa.uterm(lat)
x = self.spa.xterm(u, lat, elev)
y = self.spa.yterm(u, lat, elev)
assert_almost_equals(dAlpha, self.spa.parallax_sun_right_ascension(
x, xi, H, delta), 4)
def test_topocentric_sun_right_ascension(self):
assert_almost_equals(alpha_prime,
self.spa.topocentric_sun_right_ascension(
alpha, dAlpha), 5)
def test_topocentric_sun_declination(self):
u = self.spa.uterm(lat)
x = self.spa.xterm(u, lat, elev)
y = self.spa.yterm(u, lat, elev)
assert_almost_equals(delta_prime, self.spa.topocentric_sun_declination(
delta, x, y, xi, dAlpha,H), 5)
def test_topocentric_local_hour_angle(self):
assert_almost_equals(H_prime, self.spa.topocentric_local_hour_angle(
H, dAlpha), 5)
def test_topocentric_elevation_angle_without_atmosphere(self):
assert_almost_equals(
e0, self.spa.topocentric_elevation_angle_without_atmosphere(
lat, delta_prime, H_prime), 6)
def test_atmospheric_refraction_correction(self):
assert_almost_equals(de, self.spa.atmospheric_refraction_correction(
pressure, temp, e0, atmos_refract), 6)
def test_topocentric_elevation_angle(self):
assert_almost_equals(e, self.spa.topocentric_elevation_angle(e0, de), 6)
def test_topocentric_zenith_angle(self):
assert_almost_equals(theta, self.spa.topocentric_zenith_angle(e), 5)
def test_topocentric_astronomers_azimuth(self):
assert_almost_equals(Gamma, self.spa.topocentric_astronomers_azimuth(
H_prime, delta_prime, lat), 5)
def test_topocentric_azimuth_angle(self):
assert_almost_equals(Phi, self.spa.topocentric_azimuth_angle(Gamma), 5)
def test_solar_position(self):
npt.assert_almost_equal(
np.array([[theta, theta0, e, e0, Phi]]).T, self.spa.solar_position(
unixtimes, lat, lon, elev, pressure, temp, delta_t,
atmos_refract)[:-1], 5)
npt.assert_almost_equal(
np.array([[v, alpha, delta]]).T, self.spa.solar_position(
unixtimes, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, sst=True)[:3], 5)
def test_equation_of_time(self):
eot = 14.64
M = self.spa.sun_mean_longitude(JME)
assert_almost_equals(eot, self.spa.equation_of_time(
M, alpha, dPsi, epsilon), 2)
def test_transit_sunrise_sunset(self):
# tests at greenwich
times = pd.DatetimeIndex([dt.datetime(1996, 7, 5, 0),
dt.datetime(2004, 12, 4, 0)]
).tz_localize('UTC').astype(np.int64)*1.0/10**9
sunrise = pd.DatetimeIndex([dt.datetime(1996, 7, 5, 7, 8, 15),
dt.datetime(2004, 12, 4, 4, 38, 57)]
).tz_localize('UTC').astype(np.int64)*1.0/10**9
sunset = pd.DatetimeIndex([dt.datetime(1996, 7, 5, 17, 1, 4),
dt.datetime(2004, 12, 4, 19, 2, 2)]
).tz_localize('UTC').astype(np.int64)*1.0/10**9
result = self.spa.transit_sunrise_sunset(times, -35.0, 0.0, 64.0, 1)
npt.assert_almost_equal(sunrise/1e3, result[1]/1e3, 3)
npt.assert_almost_equal(sunset/1e3, result[2]/1e3, 3)
times = pd.DatetimeIndex([dt.datetime(1994, 1, 2),]
).tz_localize('UTC').astype(np.int64)*1.0/10**9
sunset = pd.DatetimeIndex([dt.datetime(1994, 1, 2, 16, 59, 55),]
).tz_localize('UTC').astype(np.int64)*1.0/10**9
sunrise = pd.DatetimeIndex([dt.datetime(1994, 1, 2, 7, 8, 12),]
).tz_localize('UTC').astype(np.int64)*1.0/10**9
result = self.spa.transit_sunrise_sunset(times, 35.0, 0.0, 64.0, 1)
npt.assert_almost_equal(sunrise/1e3, result[1]/1e3, 3)
npt.assert_almost_equal(sunset/1e3, result[2]/1e3, 3)
# tests from USNO
# Golden
times = pd.DatetimeIndex([dt.datetime(2015, 1, 2),
dt.datetime(2015, 4, 2),
dt.datetime(2015, 8, 2),
dt.datetime(2015, 12, 2),],
).tz_localize('UTC').astype(np.int64)*1.0/10**9
sunrise = pd.DatetimeIndex([dt.datetime(2015, 1, 2, 7, 19),
dt.datetime(2015, 4, 2, 5, 43),
dt.datetime(2015, 8, 2, 5, 1),
dt.datetime(2015, 12, 2, 7, 1),],
).tz_localize('MST').astype(np.int64)*1.0/10**9
sunset = pd.DatetimeIndex([dt.datetime(2015, 1, 2, 16, 49),
dt.datetime(2015, 4, 2, 18, 24),
dt.datetime(2015, 8, 2, 19, 10),
dt.datetime(2015, 12, 2, 16, 38),],
).tz_localize('MST').astype(np.int64)*1.0/10**9
result = self.spa.transit_sunrise_sunset(times, 39.0, -105.0, 64.0, 1)
npt.assert_almost_equal(sunrise/1e3, result[1]/1e3, 1)
npt.assert_almost_equal(sunset/1e3, result[2]/1e3, 1)
# Beijing
times = pd.DatetimeIndex([dt.datetime(2015, 1, 2),
dt.datetime(2015, 4, 2),
dt.datetime(2015, 8, 2),
dt.datetime(2015, 12, 2),],
).tz_localize('UTC').astype(np.int64)*1.0/10**9
sunrise = pd.DatetimeIndex([dt.datetime(2015, 1, 2, 7, 36),
dt.datetime(2015, 4, 2, 5, 58),
dt.datetime(2015, 8, 2, 5, 13),
dt.datetime(2015, 12, 2, 7, 17),],
).tz_localize('Asia/Shanghai'
).astype(np.int64)*1.0/10**9
sunset = pd.DatetimeIndex([dt.datetime(2015, 1, 2, 17, 0),
dt.datetime(2015, 4, 2, 18, 39),
dt.datetime(2015, 8, 2, 19, 28),
dt.datetime(2015, 12, 2, 16, 50),],
).tz_localize('Asia/Shanghai'
).astype(np.int64)*1.0/10**9
result = self.spa.transit_sunrise_sunset(times, 39.917, 116.383, 64.0,1)
npt.assert_almost_equal(sunrise/1e3, result[1]/1e3, 1)
npt.assert_almost_equal(sunset/1e3, result[2]/1e3, 1)
class NumpySpaTest(unittest.TestCase, SpaBase):
"""Import spa without compiling to numba then run tests"""
@classmethod
def setUpClass(self):
os.environ['PVLIB_USE_NUMBA'] = '0'
import pvlib.spa as spa
spa = reload(spa)
self.spa = spa
@classmethod
def tearDownClass(self):
del os.environ['PVLIB_USE_NUMBA']
def test_julian_day(self):
assert_almost_equals(JD, self.spa.julian_day(unixtimes)[0], 6)
@unittest.skipIf(numba_version_int < 17,
'Numba not installed or version not >= 0.17.0')
class NumbaSpaTest(unittest.TestCase, SpaBase):
"""Import spa, compiling to numba, and run tests"""
@classmethod
def setUpClass(self):
os.environ['PVLIB_USE_NUMBA'] = '1'
if numba_version_int >= 17:
import pvlib.spa as spa
spa = reload(spa)
self.spa = spa
@classmethod
def tearDownClass(self):
del os.environ['PVLIB_USE_NUMBA']
def test_julian_day(self):
assert_almost_equals(JD, self.spa.julian_day(unixtimes[0]), 6)
def test_solar_position_singlethreaded(self):
npt.assert_almost_equal(
np.array([[theta, theta0, e, e0, Phi]]).T, self.spa.solar_position(
unixtimes, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads=1)[:-1], 5)
npt.assert_almost_equal(
np.array([[v, alpha, delta]]).T, self.spa.solar_position(
unixtimes, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads=1, sst=True)[:3], 5)
def test_solar_position_multithreaded(self):
result = np.array([theta, theta0, e, e0, Phi])
nresult = np.array([result, result, result]).T
times = np.array([unixtimes[0], unixtimes[0], unixtimes[0]])
npt.assert_almost_equal(
nresult
, self.spa.solar_position(
times, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads=8)[:-1], 5)
result = np.array([v, alpha, delta])
nresult = np.array([result, result, result]).T
npt.assert_almost_equal(
nresult
, self.spa.solar_position(
times, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads=8, sst=True)[:3], 5)
| bsd-3-clause |
nrhine1/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 216 | 8091 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
Silmathoron/nest-simulator | pynest/nest/tests/test_get_set.py | 5 | 21303 | # -*- coding: utf-8 -*-
#
# test_get_set.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
NodeCollection get/set tests
"""
import unittest
import nest
import json
try:
import numpy as np
HAVE_NUMPY = True
except ImportError:
HAVE_NUMPY = False
try:
import pandas
import pandas.util.testing as pt
HAVE_PANDAS = True
except ImportError:
HAVE_PANDAS = False
@nest.ll_api.check_stack
class TestNodeCollectionGetSet(unittest.TestCase):
"""NodeCollection get/set tests"""
def setUp(self):
nest.ResetKernel()
def test_get(self):
"""
Test that get function works as expected.
"""
nodes = nest.Create('iaf_psc_alpha', 10)
C_m = nodes.get('C_m')
node_ids = nodes.get('global_id')
E_L = nodes.get('E_L')
V_m = nodes.get('V_m')
t_ref = nodes.get('t_ref')
g = nodes.get(['local', 'thread', 'vp'])
local = g['local']
thread = g['thread']
vp = g['vp']
self.assertEqual(C_m, (250.0, 250.0, 250.0, 250.0, 250.0,
250.0, 250.0, 250.0, 250.0, 250.0))
self.assertEqual(node_ids, tuple(range(1, 11)))
self.assertEqual(E_L, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(V_m, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(t_ref, (2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0))
self.assertTrue(local)
self.assertEqual(thread, (0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
self.assertEqual(vp, (0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
g_reference = {'local': (True, True, True, True, True,
True, True, True, True, True),
'thread': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
'vp': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)}
self.assertEqual(g, g_reference)
def test_get_sliced(self):
"""
Test that get works on sliced NodeCollections
"""
nodes = nest.Create('iaf_psc_alpha', 10)
V_m = nodes[2:5].get('V_m')
g = nodes[5:7].get(['t_ref', 'tau_m'])
C_m = nodes[2:9:2].get('C_m')
self.assertEqual(V_m, (-70.0, -70.0, -70.0))
self.assertEqual(g['t_ref'], (2.0, 2.0))
self.assertEqual(C_m, (250.0, 250.0, 250.0, 250.0))
def test_get_composite(self):
"""
Test that get function works on composite NodeCollections
"""
n1 = nest.Create('iaf_psc_alpha', 2)
n2 = nest.Create('iaf_psc_delta', 2)
n3 = nest.Create('iaf_psc_exp')
n4 = nest.Create('iaf_psc_alpha', 3)
n1.set(V_m=[-77., -88.])
n3.set({'V_m': -55.})
n1.set(C_m=[251., 252.])
n2.set(C_m=[253., 254.])
n3.set({'C_m': 255.})
n4.set(C_m=[256., 257., 258.])
n5 = n1 + n2 + n3 + n4
status_dict = n5.get()
# Check that we get values in correct order
vm_ref = (-77., -88., -70., -70., -55, -70., -70., -70.)
self.assertEqual(status_dict['V_m'], vm_ref)
# Check that we get None where not applicable
# tau_syn_ex is part of iaf_psc_alpha
tau_ref = (2., 2., None, None, 2., 2., 2., 2.)
self.assertEqual(status_dict['tau_syn_ex'], tau_ref)
# refractory_input is part of iaf_psc_delta
refrac_ref = (None, None,
False, False,
None, None,
None, None)
self.assertEqual(status_dict['refractory_input'], refrac_ref)
# Check that calling get with string works on composite NCs, both on
# parameters all the models have, and on individual parameters.
Cm_ref = [x * 1. for x in range(251, 259)]
Cm = n5.get('C_m')
self.assertEqual(list(Cm), Cm_ref)
refrac = n5.get('refractory_input')
self.assertEqual(refrac, refrac_ref)
@unittest.skipIf(not HAVE_NUMPY, 'NumPy package is not available')
def test_get_different_size(self):
"""
Test get with different input for different sizes of NodeCollections
"""
single_sd = nest.Create('spike_detector', 1)
multi_sd = nest.Create('spike_detector', 10)
empty_array_float = np.array([], dtype=np.float64)
empty_array_int = np.array([], dtype=np.int64)
# Single node, literal parameter
self.assertEqual(single_sd.get('start'), 0.0)
# Single node, array parameter
self.assertEqual(single_sd.get(['start', 'time_in_steps']),
{'start': 0.0, 'time_in_steps': False})
# Single node, hierarchical with literal parameter
np.testing.assert_array_equal(single_sd.get('events', 'times'),
empty_array_float)
# Multiple nodes, hierarchical with literal parameter
values = multi_sd.get('events', 'times')
for v in values:
np.testing.assert_array_equal(v, empty_array_float)
# Single node, hierarchical with array parameter
values = single_sd.get('events', ['senders', 'times'])
self.assertEqual(len(values), 2)
self.assertTrue('senders' in values)
self.assertTrue('times' in values)
np.testing.assert_array_equal(values['senders'], empty_array_int)
np.testing.assert_array_equal(values['times'], empty_array_float)
# Multiple nodes, hierarchical with array parameter
values = multi_sd.get('events', ['senders', 'times'])
self.assertEqual(len(values), 2)
self.assertTrue('senders' in values)
self.assertTrue('times' in values)
self.assertEqual(len(values['senders']), len(multi_sd))
for v in values['senders']:
np.testing.assert_array_equal(v, empty_array_int)
for v in values['times']:
np.testing.assert_array_equal(v, empty_array_float)
# Single node, no parameter (gets all values)
values = single_sd.get()
num_values_single_sd = len(values.keys())
self.assertEqual(values['start'], 0.0)
# Multiple nodes, no parameter (gets all values)
values = multi_sd.get()
self.assertEqual(len(values.keys()), num_values_single_sd)
self.assertEqual(values['start'],
tuple(0.0 for i in range(len(multi_sd))))
@unittest.skipIf(not HAVE_PANDAS, 'Pandas package is not available')
def test_get_pandas(self):
"""
Test that get function with Pandas output works as expected.
"""
single_sd = nest.Create('spike_detector', 1)
multi_sd = nest.Create('spike_detector', 10)
empty_array_float = np.array([], dtype=np.float64)
# Single node, literal parameter
pt.assert_frame_equal(single_sd.get('start', output='pandas'),
pandas.DataFrame({'start': [0.0]},
index=tuple(single_sd.tolist())))
# Multiple nodes, literal parameter
pt.assert_frame_equal(multi_sd.get('start', output='pandas'),
pandas.DataFrame(
{'start': [0.0 for i in range(
len(multi_sd))]},
index=tuple(multi_sd.tolist())))
# Single node, array parameter
pt.assert_frame_equal(single_sd.get(['start', 'n_events'],
output='pandas'),
pandas.DataFrame({'start': [0.0],
'n_events': [0]},
index=tuple(single_sd.tolist())))
# Multiple nodes, array parameter
ref_dict = {'start': [0.0 for i in range(len(multi_sd))],
'n_events': [0]}
pt.assert_frame_equal(multi_sd.get(['start', 'n_events'],
output='pandas'),
pandas.DataFrame(ref_dict,
index=tuple(multi_sd.tolist())))
# Single node, hierarchical with literal parameter
pt.assert_frame_equal(single_sd.get('events', 'times',
output='pandas'),
pandas.DataFrame({'times': [[]]},
index=tuple(single_sd.tolist())))
# Multiple nodes, hierarchical with literal parameter
ref_dict = {'times': [empty_array_float
for i in range(len(multi_sd))]}
pt.assert_frame_equal(multi_sd.get('events', 'times',
output='pandas'),
pandas.DataFrame(ref_dict,
index=tuple(multi_sd.tolist())))
# Single node, hierarchical with array parameter
ref_df = pandas.DataFrame(
{'times': [[]], 'senders': [[]]}, index=tuple(single_sd.tolist()))
ref_df = ref_df.reindex(sorted(ref_df.columns), axis=1)
pt.assert_frame_equal(single_sd.get(
'events', ['senders', 'times'], output='pandas'),
ref_df)
# Multiple nodes, hierarchical with array parameter
ref_dict = {'times': [[] for i in range(len(multi_sd))],
'senders': [[] for i in range(len(multi_sd))]}
ref_df = pandas.DataFrame(
ref_dict,
index=tuple(multi_sd.tolist()))
ref_df = ref_df.reindex(sorted(ref_df.columns), axis=1)
sd_df = multi_sd.get('events', ['senders', 'times'], output='pandas')
sd_df = sd_df.reindex(sorted(sd_df.columns), axis=1)
pt.assert_frame_equal(sd_df,
ref_df)
# Single node, no parameter (gets all values)
values = single_sd.get(output='pandas')
num_values_single_sd = values.shape[1]
self.assertEqual(values['start'][tuple(single_sd.tolist())[0]], 0.0)
# Multiple nodes, no parameter (gets all values)
values = multi_sd.get(output='pandas')
self.assertEqual(values.shape, (len(multi_sd), num_values_single_sd))
pt.assert_series_equal(values['start'],
pandas.Series({key: 0.0
for key in tuple(multi_sd.tolist())},
dtype=np.float64,
name='start'))
# With data in events
nodes = nest.Create('iaf_psc_alpha', 10)
pg = nest.Create('poisson_generator', {'rate': 70000.0})
nest.Connect(pg, nodes)
nest.Connect(nodes, single_sd)
nest.Connect(nodes, multi_sd, 'one_to_one')
nest.Simulate(39)
ref_dict = {'times': [[31.8, 36.1, 38.5]],
'senders': [[17, 12, 20]]}
ref_df = pandas.DataFrame(ref_dict, index=tuple(single_sd.tolist()))
ref_df = ref_df.reindex(sorted(ref_df.columns), axis=1)
pt.assert_frame_equal(single_sd.get('events', ['senders', 'times'],
output='pandas'),
ref_df)
ref_dict = {'times': [[36.1], [], [], [], [], [31.8], [], [], [38.5],
[]],
'senders': [[12], [], [], [], [], [17], [], [], [20], []]}
ref_df = pandas.DataFrame(ref_dict, index=tuple(multi_sd.tolist()))
ref_df = ref_df.reindex(sorted(ref_df.columns), axis=1)
pt.assert_frame_equal(multi_sd.get('events', ['senders', 'times'],
output='pandas'),
ref_df)
def test_get_JSON(self):
"""
Test that get function with json output works as expected.
"""
single_sd = nest.Create('spike_detector', 1)
multi_sd = nest.Create('spike_detector', 10)
# Single node, literal parameter
self.assertEqual(json.loads(
single_sd.get('start', output='json')), 0.0)
# Multiple nodes, literal parameter
self.assertEqual(
json.loads(multi_sd.get('start', output='json')),
len(multi_sd) * [0.0])
# Single node, array parameter
ref_dict = {'start': 0.0, 'n_events': 0}
self.assertEqual(
json.loads(single_sd.get(['start', 'n_events'], output='json')),
ref_dict)
# Multiple nodes, array parameter
ref_dict = {'start': len(multi_sd) * [0.0],
'n_events': len(multi_sd) * [0]}
self.assertEqual(
json.loads(multi_sd.get(['start', 'n_events'], output='json')),
ref_dict)
# Single node, hierarchical with literal parameter
self.assertEqual(json.loads(single_sd.get(
'events', 'times', output='json')), [])
# Multiple nodes, hierarchical with literal parameter
ref_list = len(multi_sd) * [[]]
self.assertEqual(
json.loads(multi_sd.get('events', 'times', output='json')),
ref_list)
# Single node, hierarchical with array parameter
ref_dict = {'senders': [], 'times': []}
self.assertEqual(
json.loads(single_sd.get(
'events', ['senders', 'times'], output='json')),
ref_dict)
# Multiple nodes, hierarchical with array parameter
ref_dict = {'times': len(multi_sd) * [[]],
'senders': len(multi_sd) * [[]]}
self.assertEqual(
json.loads(multi_sd.get(
'events', ['senders', 'times'], output='json')),
ref_dict)
# Single node, no parameter (gets all values)
values = json.loads(single_sd.get(output='json'))
num_values_single_sd = len(values)
self.assertEqual(values['start'], 0.0)
# Multiple nodes, no parameter (gets all values)
values = json.loads(multi_sd.get(output='json'))
self.assertEqual(len(values), num_values_single_sd)
self.assertEqual(values['start'], len(multi_sd) * [0.0])
# With data in events
nodes = nest.Create('iaf_psc_alpha', 10)
pg = nest.Create('poisson_generator', {'rate': 70000.0})
nest.Connect(pg, nodes)
nest.Connect(nodes, single_sd)
nest.Connect(nodes, multi_sd, 'one_to_one')
nest.Simulate(39)
ref_dict = {'times': [31.8, 36.1, 38.5],
'senders': [17, 12, 20]}
self.assertEqual(
json.loads(single_sd.get(
'events', ['senders', 'times'], output='json')),
ref_dict)
ref_dict = {'times': [[36.1], [], [], [], [], [31.8], [], [], [38.5],
[]],
'senders': [[12], [], [], [], [], [17], [], [], [20], []]}
self.assertEqual(
json.loads(multi_sd.get(
'events', ['senders', 'times'], output='json')),
ref_dict)
def test_set(self):
"""
Test that set function works as expected.
"""
nodes = nest.Create('iaf_psc_alpha', 10)
# Dict to set same value for all nodes.
nodes.set({'C_m': 100.0})
C_m = nodes.get('C_m')
self.assertEqual(C_m, (100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0, 100.0, 100.0, 100.0))
# Set same value for all nodes.
nodes.set(tau_Ca=500.0)
tau_Ca = nodes.get('tau_Ca')
self.assertEqual(tau_Ca, (500.0, 500.0, 500.0, 500.0, 500.0,
500.0, 500.0, 500.0, 500.0, 500.0))
# List of dicts, where each dict corresponds to a single node.
nodes.set(({'V_m': 10.0}, {'V_m': 20.0}, {'V_m': 30.0}, {'V_m': 40.0},
{'V_m': 50.0}, {'V_m': 60.0}, {'V_m': 70.0}, {'V_m': 80.0},
{'V_m': 90.0}, {'V_m': -100.0}))
V_m = nodes.get('V_m')
self.assertEqual(V_m, (10.0, 20.0, 30.0, 40.0, 50.0,
60.0, 70.0, 80.0, 90.0, -100.0))
# Set value of a parameter based on list. List must be length of nodes.
nodes.set(V_reset=[-85., -82., -80., -77., -75.,
-72., -70., -67., -65., -62.])
V_reset = nodes.get('V_reset')
self.assertEqual(V_reset, (-85., -82., -80., -77., -75.,
-72., -70., -67., -65., -62.))
with self.assertRaises(IndexError):
nodes.set(V_reset=[-85., -82., -80., -77., -75.])
# Set different parameters with a dictionary.
nodes.set({'t_ref': 44.0, 'tau_m': 2.0, 'tau_minus': 42.0})
g = nodes.get(['t_ref', 'tau_m', 'tau_minus'])
self.assertEqual(g['t_ref'], (44.0, 44.0, 44.0, 44.0, 44.0,
44.0, 44.0, 44.0, 44.0, 44.0))
self.assertEqual(g['tau_m'], (2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0))
self.assertEqual(g['tau_minus'], (42.0, 42.0, 42.0, 42.0, 42.0,
42.0, 42.0, 42.0, 42.0, 42.0))
with self.assertRaises(nest.kernel.NESTError):
nodes.set({'vp': 2})
def test_set_composite(self):
"""
Test that set works on composite NodeCollections
"""
nodes = nest.Create('iaf_psc_alpha', 10)
nodes[2:5].set(({'V_m': -50.0}, {'V_m': -40.0}, {'V_m': -30.0}))
nodes[5:7].set({'t_ref': 4.4, 'tau_m': 3.0})
nodes[2:9:2].set(C_m=111.0)
V_m = nodes.get('V_m')
g = nodes.get(['t_ref', 'tau_m'])
C_m = nodes.get('C_m')
self.assertEqual(V_m, (-70.0, -70.0, -50.0, -40.0, -30.0,
-70.0, -70.0, -70.0, -70.0, -70.0,))
self.assertEqual(g, {'t_ref': (2.0, 2.0, 2.0, 2.0, 2.0,
4.4, 4.4, 2.0, 2.0, 2.0),
'tau_m': (10.0, 10.0, 10.0, 10.0, 10.0,
3.00, 3.00, 10.0, 10.0, 10.0)})
self.assertEqual(C_m, (250.0, 250.0, 111.0, 250.0, 111.0,
250.0, 111.0, 250.0, 111.0, 250.0))
def test_get_attribute(self):
"""Test get using getattr"""
nodes = nest.Create('iaf_psc_alpha', 10)
self.assertEqual(nodes.C_m, (250.0, 250.0, 250.0, 250.0, 250.0,
250.0, 250.0, 250.0, 250.0, 250.0))
self.assertEqual(nodes.global_id, tuple(range(1, 11)))
self.assertEqual(nodes.E_L, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(nodes.V_m, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(nodes.t_ref, (2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0))
with self.assertRaises(KeyError):
print(nodes.nonexistent_attribute)
self.assertIsNone(nodes.spatial)
spatial_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid([2, 2]))
self.assertIsNotNone(spatial_nodes.spatial)
spatial_reference = {'network_size': 4,
'center': (0.0, 0.0),
'edge_wrap': False,
'extent': (1.0, 1.0),
'shape': (2, 2)}
self.assertEqual(spatial_nodes.spatial, spatial_reference)
def test_set_attribute(self):
"""Test set using setattr"""
nodes = nest.Create('iaf_psc_alpha', 10)
nodes.C_m = 100.0
self.assertEqual(nodes.get('C_m'), (100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0, 100.0, 100.0, 100.0))
v_reset_reference = (-85., -82., -80., -77., -75., -72., -70., -67., -65., -62.)
nodes.V_reset = v_reset_reference
self.assertEqual(nodes.get('V_reset'), v_reset_reference)
with self.assertRaises(IndexError):
nodes.V_reset = [-85., -82., -80., -77., -75.]
with self.assertRaises(nest.kernel.NESTError):
nodes.nonexistent_attribute = 1.
def suite():
suite = unittest.makeSuite(TestNodeCollectionGetSet, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| gpl-2.0 |
modsim/molyso | molyso/mm/highlevel_interactive_advanced_ground_truth.py | 1 | 28374 | # -*- coding: utf-8 -*-
"""
documentation
"""
from __future__ import division, unicode_literals, print_function
import numpy as np
import time
from .tracking_output import s_to_h
from ..generic.etc import QuickTableDumper
from .fluorescence import FluorescentChannel
import json
import jsonpickle
import jsonpickle.ext.numpy as jsonpickle_numpy
jsonpickle_numpy.register_handlers()
class PolyLinesManager:
double_click_timeout = 0.250
def __getstate__(self):
dict_copy = self.__dict__.copy()
dict_copy['figure'] = None
dict_copy['update_callback'] = None
return dict_copy
def __setstate__(self, state):
self.__dict__ = state
def __init__(self, figure, line_segments=[]):
self.figure = figure
self.figure.canvas.mpl_connect('pick_event', self.on_pick)
self.figure.canvas.mpl_connect('button_release_event', self.on_button_release)
self.mouse_move_handler = None
self.artist_mapping = {}
self.line_segments = []
self.selected_artist = None
self.selected_point = None
self.last_click = 0.0
self.update_callback = None
for ls in line_segments:
self.add(ls)
def add(self, *lss):
for ls in lss:
ls.plot = None
self.line_segments.append(ls)
def delete(self, *lss):
for ls in lss:
if ls in self.line_segments:
self.line_segments.remove(ls)
if ls.plot:
ls.plot.remove()
if ls.plot in self.artist_mapping:
del self.artist_mapping[ls.plot]
def draw(self, ax):
self.artist_mapping.clear()
for ls in self.line_segments:
ls.draw(ax)
self.artist_mapping[ls.plot] = ls
def on_pick(self, event):
if event.artist in self.artist_mapping:
mouse_coords = np.array([event.mouseevent.xdata, event.mouseevent.ydata])
ls = self.artist_mapping[event.artist]
distances = np.sqrt(((ls.points - mouse_coords)**2).sum(axis=1))
n_th_point = np.argmin(distances)
distances[n_th_point] = np.inf
n2_th_point = np.argmin(distances)
now = time.time()
if (now - self.last_click) > self.double_click_timeout:
self.last_click = now
self.selected_artist = ls
self.selected_point = n_th_point
self.track_mouse()
else:
self.last_click = now
ls.insert(min(n_th_point, n2_th_point), max(n_th_point, n2_th_point), mouse_coords, figure=event.canvas.figure)
def track_mouse(self):
self.untrack_mouse()
self.mouse_move_handler = self.figure.canvas.mpl_connect('motion_notify_event', self.on_mouse_move)
def untrack_mouse(self):
if self.mouse_move_handler:
self.figure.canvas.mpl_disconnect(self.mouse_move_handler)
self.mouse_move_handler = None
def on_mouse_move(self, event):
if event.inaxes:
self.selected_artist.update(n=self.selected_point, xy=(event.xdata, event.ydata), figure=self.figure)
def on_button_release(self, event):
if self.selected_artist:
self.selected_artist = None
self.untrack_mouse()
# some more?
if self.update_callback:
self.update_callback()
class PolyLine:
def __getstate__(self):
dict_copy = self.__dict__.copy()
dict_copy['plot'] = None
return dict_copy
def __setstate__(self, state):
self.__dict__ = state
def __init__(self, points, closed=False):
self.points = np.array(points)
closed = False # not working yet
if closed:
self.points = np.r_[self.points, [self.points[0]]]
self.closed = closed
self.plot = None
self.plot_kwargs = dict(marker='o', picker=5)
def draw(self, ax):
if self.plot is None:
self.plot, = ax.plot(self.points[:, 0], self.points[:, 1], **self.plot_kwargs)
def redraw(self, figure=None):
self.plot.set_xdata(self.points[:, 0])
self.plot.set_ydata(self.points[:, 1])
if figure:
figure.canvas.draw_idle()
def update(self, n=0, xy=(0.0, 0.0), figure=None):
self.points[n] = xy
if self.closed:
if n == 0:
self.points[-1] = self.points[0]
elif n == len(self.points) - 1:
self.points[0] = self.points[-1]
self.redraw(figure)
def insert_relative(self, lo=0, hi=1, relative=0.5, figure=None):
lo_point, hi_point = self.points[lo], self.points[hi]
xy = ((hi_point - lo_point) * relative + lo_point)
self.points = np.r_[
self.points[:lo+1],
[xy],
self.points[hi:]
]
self.redraw(figure)
def insert(self, lo=0, hi=1, xy=None, figure=None):
self.points = np.r_[
self.points[:lo+1],
[xy],
self.points[hi:]
]
self.redraw(figure)
class PairedPolyLine(PolyLine):
def __init__(self, points, closed=False):
super().__init__(points, closed=closed)
self.other = None
self.pin = None
self.plot_kwargs['c'] = np.random.rand(3)
def connect(self, other, pin=None):
self.other = other
self.pin = pin
other.other = self
other.pin = pin
other.plot_kwargs = self.plot_kwargs
def insert(self, lo=0, hi=1, xy=None, figure=None):
super().insert(lo, hi, xy=xy, figure=figure)
self.other.insert_relative(lo, hi, relative=0.5, figure=figure)
self.handle_pin(lo+1, figure=figure)
def update(self, n=0, xy=(0.0, 0.0), figure=None):
super().update(n=n, xy=xy, figure=figure)
self.handle_pin(n, figure=figure)
def handle_pin(self, n, figure=None):
if self.pin is not None:
self.other.points[n, self.pin] = self.points[n, self.pin]
self.redraw(figure)
self.other.redraw(figure)
def interactive_advanced_ground_truth_main(args, tracked_results):
"""
Ground truth mode entry function.
:param args:
:param tracked_results:
:return: :raise SystemExit:
"""
calibration_px_to_mu = next(iter(tracked_results.values())).first.image.calibration_px_to_mu
acceptable_pos_chans = \
{p: list(range(len(tracked_results[list(tracked_results.keys())[p]].channel_accumulator.keys())))
for p
in range(len(tracked_results.keys()))
if len(tracked_results[list(tracked_results.keys())[p]].channel_accumulator.keys()) > 0}
def plots_info():
"""
Outputs some information about the data set.
"""
print("Positions " + str(list(tracked_results.keys())))
print("Acceptable channels per position " + repr(acceptable_pos_chans))
plots_info()
ground_truth_data = args.advanced_ground_truth
# noinspection PyUnresolvedReferences
try:
with open(ground_truth_data, 'r') as fp:
all_envs = jsonpickle.loads(fp.read())
except FileNotFoundError:
print("File did not exist, starting anew")
all_envs = {}
except json.decoder.JSONDecodeError:
print("Corrupted (empty?) file, starting anew")
all_envs = {}
def save_data():
"""
Saves the ground truth data to the file specified. (pickled data)
"""
with open(ground_truth_data, 'w+') as inner_fp:
inner_fp.write(jsonpickle.dumps(all_envs))
print("Saved data to %s" % (ground_truth_data,))
lowest_position = min(acceptable_pos_chans.keys())
highest_position = max(acceptable_pos_chans.keys())
next_dataset = [lowest_position, next(iter(acceptable_pos_chans[lowest_position]))]
def perform_it():
"""
Runs the ground truth mode.
:return: :raise SystemExit:
"""
next_pos, next_chan = next_dataset
def empty_env():
"""
Generates an empty environment.
:return:
"""
return {
'last_point_x': None,
'last_point_y': None,
'paired_polylines': [],
'polyline_results': {}
}
key = repr((next_pos, next_chan))
if key not in all_envs:
all_envs[key] = empty_env()
env = all_envs[key]
pos = list(tracked_results.keys())[next_pos]
tracking = tracked_results[pos]
chan_num = list(tracking.channel_accumulator.keys())[next_chan]
channels = tracking.channel_accumulator[chan_num]
print("Opening position %d, channel %d" % (pos, chan_num,))
data = np.zeros((len(channels), 6))
n_timepoint, n_width, n_height, n_top, n_bottom, n_width_cumsum = 0, 1, 2, 3, 4, 5
some_fluorescence_channel_image = some_channel_image = None
fluorescence_count = 0
for n, cc in enumerate(channels):
data[n, n_timepoint] = cc.image.timepoint
data[n, n_width] = cc.channel_image.shape[1]
data[n, n_height] = cc.channel_image.shape[0]
data[n, n_top] = cc.top
data[n, n_bottom] = cc.bottom
some_channel_image = cc.channel_image
if isinstance(cc, FluorescentChannel):
fluorescence_count = len(cc.fluorescences_channel_image)
some_fluorescence_channel_image = cc.fluorescences_channel_image[0]
if fluorescence_count > 0 and some_fluorescence_channel_image is None:
print("File generated from fluorescence data, but no fluorescence channel information in cache.")
print("Rerun analysis with -cfi/--channel-fluorescence-images option")
data[:, n_width_cumsum] = np.cumsum(data[:, n_width])
max_top, min_top = data[:, n_top].max(), data[:, n_top].min()
max_bottom, min_bottom = data[:, n_bottom].max(), data[:, n_bottom].min()
low, high = int(np.floor(min_top)), int(np.ceil(max_bottom))
large_image = np.zeros((high - low, int(data[-1, n_width_cumsum])), dtype=some_channel_image.dtype)
large_fluorescences_image = None
if fluorescence_count and some_fluorescence_channel_image is not None:
large_fluorescences_image = np.zeros(
(fluorescence_count, high - low, int(data[-1, n_width_cumsum])),
dtype=some_fluorescence_channel_image.dtype)
large_image_min_max = [float('+Inf'), float('-Inf')]
large_fluorescence_image_min_max = [[float('+Inf'), float('-Inf')]] * fluorescence_count
fluorescence_backgrounds = [dict() for _ in range(fluorescence_count)]
for n, cc in enumerate(channels):
lower_border = int(np.floor(data[n, n_top] - low))
large_image[
lower_border:int(lower_border + data[n, n_height]),
int(data[n, n_width_cumsum] - data[n, n_width]):int(data[n, n_width_cumsum])
] = cc.channel_image
large_image_min_max = [min(cc.channel_image.min(), large_image_min_max[0]),
max(cc.channel_image.max(), large_image_min_max[1])]
if isinstance(cc, FluorescentChannel):
for fluorescence_c in range(fluorescence_count):
if cc.fluorescences_channel_image[fluorescence_c] is not None:
large_fluorescences_image[
fluorescence_c,
lower_border:int(lower_border + data[n, n_height]),
int(data[n, n_width_cumsum] - data[n, n_width]):int(data[n, n_width_cumsum])
] = cc.fluorescences_channel_image[fluorescence_c]
large_fluorescence_image_min_max[fluorescence_c] = [
min(cc.fluorescences_channel_image[fluorescence_c].min(), large_fluorescence_image_min_max[fluorescence_c][0]),
max(cc.fluorescences_channel_image[fluorescence_c].max(), large_fluorescence_image_min_max[fluorescence_c][1])
]
fluorescence_backgrounds[fluorescence_c][n] = cc.image.background_fluorescences[fluorescence_c]
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.25, bottom=0.25)
fig.canvas.set_window_title("Image Viewer")
channels_per_inch = 5.0
plt.rcParams['figure.figsize'] = (len(channels) / channels_per_inch, 4.0)
plt.rcParams['figure.dpi'] = 150
plt.rcParams['figure.subplot.top'] = 0.8
plt.rcParams['figure.subplot.bottom'] = 0.2
plt.rcParams['figure.subplot.left'] = 0.2
plt.rcParams['figure.subplot.right'] = 0.8
plt.rcParams['image.cmap'] = 'gray'
axes_image = plt.imshow(large_image)
axes_image.set_clim(vmin=large_image_min_max[0], vmax=large_image_min_max[1])
axes_image._molyso_image_shown = -1 # yes, that's bad
plt.title("Ground Truth — Position %d, channel %d" % (pos, chan_num,))
plt.xlabel("x [Pixel]")
plt.ylabel("y [Pixel]")
fig.tight_layout()
lm = PolyLinesManager(plt.gcf())
for p1, p2 in env['paired_polylines']:
lm.add(p1, p2)
def update_callback():
def pixels_to_timepoints(pixels):
return np.array([
np.searchsorted(data[:, n_width_cumsum], pixel, side='right')
for pixel in pixels
])
def timepoints_to_time(timepoints):
return np.array([
data[timepoint, n_timepoint]
for timepoint in timepoints
])
env['polyline_results'] = {}
env['polyline_results_timestepwise'] = {}
p_results = env['polyline_results']
p_results_timestepwise = env['polyline_results_timestepwise']
for polyline_num, (upper, lower) in enumerate(env['paired_polylines']):
assert len(lower.points) == len(upper.points)
x = lower.points[:, 0]
u_y = upper.points[:, 1]
l_y = lower.points[:, 1]
if x[0] > x[-1]: # the line is reversed!
x, u_y, l_y = x[::-1], u_y[::-1], l_y[::-1]
timepoints = pixels_to_timepoints(x)
t_deltas = timepoints[1:] - timepoints[:-1]
indices_to_keep = np.r_[[True], t_deltas != 0]
x, u_y, l_y = x[indices_to_keep], u_y[indices_to_keep], l_y[indices_to_keep]
timepoints = pixels_to_timepoints(x)
times = timepoints_to_time(timepoints)
height_deltas = u_y - l_y
height_deltas *= calibration_px_to_mu # important
height_development = np.c_[s_to_h(times), height_deltas]
height_development = height_development[1:, :] - height_development[:-1, :]
changes = height_development.copy()
changes = changes[:, 1] / changes[:, 0]
try:
average_elongation = np.average(changes, weights=height_development[:, 0])
except ZeroDivisionError:
average_elongation = float('NaN')
p_results[polyline_num] = {
'growth_start': s_to_h(times[0]),
'growth_end': s_to_h(times[-1]),
'division_age': s_to_h(times[-1] - times[0]),
'growth_rate': np.log(2) / s_to_h(times[-1] - times[0]),
'average_elongation': average_elongation
}
# tricky: for every timepoint in between
def interp(x1, x2, y1, y2, new_x):
return (((y2 - y1) / (x2 - x1)) * (new_x - x1)) + y1
p_results_timestepwise[polyline_num] = []
for n_t, t in enumerate(range(timepoints[0], timepoints[-1]+1)):
left_t = np.searchsorted(timepoints, t, side='left')
if left_t == 0:
left_t, right_t = left_t, left_t + 1
else:
left_t, right_t = left_t - 1, left_t
x_centered = data[t, n_width_cumsum] - data[t, n_width] / 2.0
new_upper = interp(x[left_t], x[right_t], u_y[left_t], u_y[right_t], x_centered)
new_lower = interp(x[left_t], x[right_t], l_y[left_t], l_y[right_t], x_centered)
# plt.plot([x_centered, x_centered], [new_upper, new_lower])
inner_results = {
'timepoint_num': t,
'timepoint': s_to_h(timepoints_to_time([t])[0]),
'length': (new_upper - new_lower) * calibration_px_to_mu
}
for fluorescence_c in range(fluorescence_count):
fimg = large_fluorescences_image[fluorescence_c,
int(new_lower):int(new_upper),
int(data[t, n_width_cumsum] - data[t, n_width]):int(data[t, n_width_cumsum])
]
jobs = dict(min=lambda a: a.min(),
max=lambda a: a.max(),
mean=lambda a: a.mean(),
std=lambda a: a.std(),
median=lambda a: np.median(a))
for fun_name, fun_lambda in jobs.items():
try:
value = fun_lambda(fimg)
except ValueError:
value = float('NaN')
inner_results['fluorescence_%s_raw_%d' % (fun_name, fluorescence_c)] = value
inner_results['fluorescence_background_%d' % (fluorescence_c)] = fluorescence_backgrounds[
fluorescence_c][t]
p_results_timestepwise[polyline_num].append(inner_results)
lm.update_callback = update_callback
lm.update_callback()
lm.draw(ax)
def refresh():
"""
Refreshes the overlay.
"""
fig.canvas.draw()
def show_help():
"""
Shows a help text for the ground truth mode.
"""
print("""
Ground Truth Mode:
= Mouse =====================================
Mark division events by right click:
First a division, then a child's division.
= Keys ======================================
h show this help
p print growth rates
(last is based on mean division time)
d delete last division event
n/N next/previous multipoint
m/M next/previous channel
F show/cycle fluorescence/brightfield
o/O output tabular data to console/file
u/U output tabular single cell data to console/file
w write data
(to previously specified filename)
i start interactive python console
q quit ground truth mode
""")
refresh()
show_help()
def click(e):
"""
:param e:
:return:
"""
x, y = e.xdata, e.ydata
if x is None or y is None:
return
if e.button == 3:
last_point_x, last_point_y = env['last_point_x'], env['last_point_y']
if last_point_x is not None:
lower = np.array([
[last_point_x, last_point_y],
[x, y]
])
upper = lower.copy() - [0.0, 30.0]
p1, p2 = PairedPolyLine(lower), PairedPolyLine(upper)
p1.connect(p2, pin=0)
env['paired_polylines'].append((p1, p2))
lm.add(p1, p2)
lm.draw(ax)
refresh()
# print(n, data[n, n_timepoint])
env['last_point_x'], env['last_point_y'] = None, None
else:
env['last_point_x'], env['last_point_y'] = x, y
def key_press(event):
"""
:param event:
:return: :raise SystemExit:
"""
def show_stats():
"""
Shows statistics.
"""
print()
p_results = [ab[1] for ab in sorted(env['polyline_results'].items(), key=lambda ab: ab[0])]
inner_mu = [res['growth_rate'] for res in p_results]
print("µ = ", inner_mu, np.mean(inner_mu))
inner_elo = [res['average_elongation'] for res in p_results]
print("elongation rate = ", inner_elo, np.mean(inner_elo))
def try_new_poschan(p, c):
"""
:param p:
:param c:
:return:
"""
next_pos, next_chan = next_dataset
if p == 1:
while (next_pos + p) not in acceptable_pos_chans and (next_pos + p) < highest_position:
p += 1
elif p == -1:
while (next_pos + p) not in acceptable_pos_chans and (next_pos + p) > lowest_position:
p -= 1
if (next_pos + p) not in acceptable_pos_chans:
print("Position does not exist")
return
if p != 0:
c = 0
next_chan = acceptable_pos_chans[next_pos + p][0]
if c == 1:
while (next_chan + c) not in acceptable_pos_chans[next_pos + p] and \
(next_chan + c) < max(acceptable_pos_chans[next_pos + p]):
c += 1
elif c == -1:
while (next_chan + c) not in acceptable_pos_chans[next_pos + p] and \
(next_chan + c) > min(acceptable_pos_chans[next_pos + p]):
c -= 1
if (next_chan + c) not in acceptable_pos_chans[next_pos + p]:
print("Channel does not exist")
return
next_dataset[0] = next_pos + p
next_dataset[1] = next_chan + c
plt.close()
if event.key == 'h':
show_help()
elif event.key == 'p':
show_stats()
elif event.key == 'd':
lm.delete(*lm.line_segments[-2:])
lm.draw(ax)
show_stats()
refresh()
# n next position, m next channel
elif event.key == 'n':
try_new_poschan(1, 0)
elif event.key == 'N':
try_new_poschan(-1, 0)
elif event.key == 'm':
try_new_poschan(0, 1)
elif event.key == 'M':
try_new_poschan(0, -1)
elif event.key == 'F':
if axes_image._molyso_image_shown < fluorescence_count:
axes_image._molyso_image_shown += 1 # TODO: Test with more than one fluorescence channels
if axes_image._molyso_image_shown == fluorescence_count:
axes_image._molyso_image_shown = -1
if axes_image._molyso_image_shown == -1:
axes_image.set_data(large_image)
# axes_image.autoscale()
axes_image.set_clim(vmin=large_image_min_max[0],
vmax=large_image_min_max[1])
else:
fluorescence_c = axes_image._molyso_image_shown
axes_image.set_data(large_fluorescences_image[fluorescence_c])
# axes_image.autoscale()
axes_image.set_clim(vmin=large_fluorescence_image_min_max[fluorescence_c][0],
vmax=large_fluorescence_image_min_max[fluorescence_c][1])
refresh()
elif event.key == 'o' or event.key == 'O':
# output
recipient = None
if event.key == 'O':
print("Please enter file name for tabular output [will be overwritten if exists]:")
file_name = input()
recipient = open(file_name, 'w+')
out = QuickTableDumper(recipient=recipient)
for key, t_env in all_envs.items():
t_pos, t_chan = map(int, key[1:-1].replace(' ', '').split(','))
x_pos = list(tracked_results.keys())[t_pos]
x_chan = list(tracked_results[x_pos].channel_accumulator.keys())[t_chan]
p_results = [ab[1] for ab in sorted(t_env['polyline_results'].items(), key=lambda ab: ab[0])]
inner_mu = [res['growth_rate'] for res in p_results]
mean_inner_mu = np.nanmean(inner_mu)
inner_elo = [res['average_elongation'] for res in p_results]
mean_inner_elo = np.nanmean(inner_elo)
for resultlet in p_results:
out.add({
'position': x_pos,
'channel': x_chan,
'growth_rate': resultlet['growth_rate'],
'growth_rate_channel_mean': mean_inner_mu,
'elongation_rate': resultlet['average_elongation'],
'elongation_rate_channel_mean': mean_inner_elo,
'division_age': resultlet['division_age'],
'growth_start': resultlet['growth_start'],
'growth_end': resultlet['growth_end'],
})
if event.key == 'O':
recipient.close()
print("File written.")
elif event.key == 'u' or event.key == 'U':
# output
recipient = None
if event.key == 'U':
print("Please enter file name for tabular output [will be overwritten if exists]:")
file_name = input()
recipient = open(file_name, 'w+')
out = QuickTableDumper(recipient=recipient)
for key, t_env in all_envs.items():
t_pos, t_chan = map(int, key[1:-1].replace(' ', '').split(','))
x_pos = list(tracked_results.keys())[t_pos]
x_chan = list(tracked_results[x_pos].channel_accumulator.keys())[t_chan]
for line_num, rows_timestepwise in t_env['polyline_results_timestepwise'].items():
for row in rows_timestepwise:
result = row.copy()
result.update({
'position': x_pos,
'channel': x_chan,
'line_num': line_num,
})
out.add(result)
if event.key == 'U':
recipient.close()
print("File written.")
elif event.key == 'w':
save_data()
elif event.key == 'i':
import code
code.InteractiveConsole(locals=globals()).interact()
elif event.key == 'q':
raise SystemExit
fig.canvas.mpl_connect('key_press_event', key_press)
fig.canvas.mpl_connect('button_press_event', click)
plt.show()
while True:
perform_it()
| bsd-2-clause |
lbishal/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
lekshmideepu/nest-simulator | pynest/examples/one_neuron_with_noise.py | 8 | 3161 | # -*- coding: utf-8 -*-
#
# one_neuron_with_noise.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
One neuron with noise
---------------------
This script simulates a neuron with input from the ``poisson_generator``, and
records the neuron's membrane potential.
"""
###############################################################################
# First, we import all necessary modules needed to simulate, analyze and
# plot our example. Additionally, we set the verbosity to only show warnings
# and reset the kernel.
# Resetting the kernel removes any nodes we may have created previously and
# resets the internal clock to zero. This allows us to execute the script
# several times in a Python shell without interference from previous NEST
# simulations.
import nest
import nest.voltage_trace
import matplotlib.pyplot as plt
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
###############################################################################
# Second, the nodes (the neuron, poisson generator (two of them), and the
# voltmeter) are created using the ``Create`` function.
# We store the returned handles in variables for later reference.
neuron = nest.Create("iaf_psc_alpha")
noise = nest.Create("poisson_generator", 2)
voltmeter = nest.Create("voltmeter")
###############################################################################
# Third, the Poisson generator is configured using ``SetStatus``, which expects
# a list of node handles and a list of parameter dictionaries. We set the
# Poisson generators to 8,000 Hz and 15,000 Hz, respectively. Note that we do
# not need to set parameters for the neuron and the voltmeter, since they have
# satisfactory defaults.
noise[0].rate = 80000.0
noise[1].rate = 15000.0
###############################################################################
# Fourth, the neuron is connected to the ``poisson_generator`` and to the
# ``voltmeter``. We also specify the synaptic weight and delay in this step.
nest.Connect(noise, neuron, syn_spec={'weight': [[1.2, -1.0]], 'delay': 1.0})
nest.Connect(voltmeter, neuron)
###############################################################################
# Now we simulate the network using ``Simulate``, which takes the
# desired simulation time in milliseconds.
nest.Simulate(1000.0)
###############################################################################
# Finally, we plot the neuron's membrane potential as a function of
# time.
nest.voltage_trace.from_device(voltmeter)
plt.show()
| gpl-2.0 |
eshaw2/SoftwareSystems | HFC_ch5/thinkplot.py | 88 | 12565 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import math
import matplotlib
import matplotlib.pyplot as pyplot
import numpy as np
# customize some matplotlib attributes
#matplotlib.rc('figure', figsize=(4, 3))
#matplotlib.rc('font', size=14.0)
#matplotlib.rc('axes', labelsize=22.0, titlesize=22.0)
#matplotlib.rc('legend', fontsize=20.0)
#matplotlib.rc('xtick.major', size=6.0)
#matplotlib.rc('xtick.minor', size=3.0)
#matplotlib.rc('ytick.major', size=6.0)
#matplotlib.rc('ytick.minor', size=3.0)
class Brewer(object):
"""Encapsulates a nice sequence of colors.
Shades of blue that look good in color and can be distinguished
in grayscale (up to a point).
Borrowed from http://colorbrewer2.org/
"""
color_iter = None
colors = ['#081D58',
'#253494',
'#225EA8',
'#1D91C0',
'#41B6C4',
'#7FCDBB',
'#C7E9B4',
'#EDF8B1',
'#FFFFD9']
# lists that indicate which colors to use depending on how many are used
which_colors = [[],
[1],
[1, 3],
[0, 2, 4],
[0, 2, 4, 6],
[0, 2, 3, 5, 6],
[0, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6],
]
@classmethod
def Colors(cls):
"""Returns the list of colors.
"""
return cls.colors
@classmethod
def ColorGenerator(cls, n):
"""Returns an iterator of color strings.
n: how many colors will be used
"""
for i in cls.which_colors[n]:
yield cls.colors[i]
raise StopIteration('Ran out of colors in Brewer.ColorGenerator')
@classmethod
def InitializeIter(cls, num):
"""Initializes the color iterator with the given number of colors."""
cls.color_iter = cls.ColorGenerator(num)
@classmethod
def ClearIter(cls):
"""Sets the color iterator to None."""
cls.color_iter = None
@classmethod
def GetIter(cls):
"""Gets the color iterator."""
return cls.color_iter
def PrePlot(num=None, rows=1, cols=1):
"""Takes hints about what's coming.
num: number of lines that will be plotted
"""
if num:
Brewer.InitializeIter(num)
# TODO: get sharey and sharex working. probably means switching
# to subplots instead of subplot.
# also, get rid of the gray background.
if rows > 1 or cols > 1:
pyplot.subplots(rows, cols, sharey=True)
global SUBPLOT_ROWS, SUBPLOT_COLS
SUBPLOT_ROWS = rows
SUBPLOT_COLS = cols
def SubPlot(rows, cols, plot_number):
"""Configures the number of subplots and changes the current plot.
rows: int
cols: int
plot_number: int
"""
pyplot.subplot(rows, cols, plot_number)
class InfiniteList(list):
"""A list that returns the same value for all indices."""
def __init__(self, val):
"""Initializes the list.
val: value to be stored
"""
list.__init__(self)
self.val = val
def __getitem__(self, index):
"""Gets the item with the given index.
index: int
returns: the stored value
"""
return self.val
def Underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
If d is None, create a new dictionary.
d: dictionary
options: keyword args to add to d
"""
if d is None:
d = {}
for key, val in options.iteritems():
d.setdefault(key, val)
return d
def Clf():
"""Clears the figure and any hints that have been set."""
Brewer.ClearIter()
pyplot.clf()
def Figure(**options):
"""Sets options for the current figure."""
Underride(options, figsize=(6, 8))
pyplot.figure(**options)
def Plot(xs, ys, style='', **options):
"""Plots a line.
Args:
xs: sequence of x values
ys: sequence of y values
style: style string passed along to pyplot.plot
options: keyword args passed to pyplot.plot
"""
color_iter = Brewer.GetIter()
if color_iter:
try:
options = Underride(options, color=color_iter.next())
except StopIteration:
print 'Warning: Brewer ran out of colors.'
Brewer.ClearIter()
options = Underride(options, linewidth=3, alpha=0.8)
pyplot.plot(xs, ys, style, **options)
def Scatter(xs, ys, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = Underride(options, color='blue', alpha=0.2,
s=30, edgecolors='none')
pyplot.scatter(xs, ys, **options)
def Pmf(pmf, **options):
"""Plots a Pmf or Hist as a line.
Args:
pmf: Hist or Pmf object
options: keyword args passed to pyplot.plot
"""
xs, ps = pmf.Render()
if pmf.name:
options = Underride(options, label=pmf.name)
Plot(xs, ps, **options)
def Pmfs(pmfs, **options):
"""Plots a sequence of PMFs.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
pmfs: sequence of PMF objects
options: keyword args passed to pyplot.plot
"""
for pmf in pmfs:
Pmf(pmf, **options)
def Hist(hist, **options):
"""Plots a Pmf or Hist with a bar plot.
The default width of the bars is based on the minimum difference
between values in the Hist. If that's too small, you can override
it by providing a width keyword argument, in the same units
as the values.
Args:
hist: Hist or Pmf object
options: keyword args passed to pyplot.bar
"""
# find the minimum distance between adjacent values
xs, fs = hist.Render()
width = min(Diff(xs))
if hist.name:
options = Underride(options, label=hist.name)
options = Underride(options,
align='center',
linewidth=0,
width=width)
pyplot.bar(xs, fs, **options)
def Hists(hists, **options):
"""Plots two histograms as interleaved bar plots.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
hists: list of two Hist or Pmf objects
options: keyword args passed to pyplot.plot
"""
for hist in hists:
Hist(hist, **options)
def Diff(t):
"""Compute the differences between adjacent elements in a sequence.
Args:
t: sequence of number
Returns:
sequence of differences (length one less than t)
"""
diffs = [t[i+1] - t[i] for i in range(len(t)-1)]
return diffs
def Cdf(cdf, complement=False, transform=None, **options):
"""Plots a CDF as a line.
Args:
cdf: Cdf object
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
Returns:
dictionary with the scale options that should be passed to
Config, Show or Save.
"""
xs, ps = cdf.Render()
scale = dict(xscale='linear', yscale='linear')
for s in ['xscale', 'yscale']:
if s in options:
scale[s] = options.pop(s)
if transform == 'exponential':
complement = True
scale['yscale'] = 'log'
if transform == 'pareto':
complement = True
scale['yscale'] = 'log'
scale['xscale'] = 'log'
if complement:
ps = [1.0-p for p in ps]
if transform == 'weibull':
xs.pop()
ps.pop()
ps = [-math.log(1.0-p) for p in ps]
scale['xscale'] = 'log'
scale['yscale'] = 'log'
if transform == 'gumbel':
xs.pop(0)
ps.pop(0)
ps = [-math.log(p) for p in ps]
scale['yscale'] = 'log'
if cdf.name:
options = Underride(options, label=cdf.name)
Plot(xs, ps, **options)
return scale
def Cdfs(cdfs, complement=False, transform=None, **options):
"""Plots a sequence of CDFs.
cdfs: sequence of CDF objects
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
"""
for cdf in cdfs:
Cdf(cdf, complement, transform, **options)
def Contour(obj, pcolor=False, contour=True, imshow=False, **options):
"""Makes a contour plot.
d: map from (x, y) to z, or object that provides GetDict
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
imshow: boolean, whether to use pyplot.imshow
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
try:
d = obj.GetDict()
except AttributeError:
d = obj
Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
xs, ys = zip(*d.iterkeys())
xs = sorted(set(xs))
ys = sorted(set(ys))
X, Y = np.meshgrid(xs, ys)
func = lambda x, y: d.get((x, y), 0)
func = np.vectorize(func)
Z = func(X, Y)
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
if imshow:
extent = xs[0], xs[-1], ys[0], ys[-1]
pyplot.imshow(Z, extent=extent, **options)
def Pcolor(xs, ys, zs, pcolor=True, contour=False, **options):
"""Makes a pseudocolor plot.
xs:
ys:
zs:
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
X, Y = np.meshgrid(xs, ys)
Z = zs
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
def Config(**options):
"""Configures the plot.
Pulls options out of the option dictionary and passes them to
the corresponding pyplot functions.
"""
names = ['title', 'xlabel', 'ylabel', 'xscale', 'yscale',
'xticks', 'yticks', 'axis']
for name in names:
if name in options:
getattr(pyplot, name)(options[name])
loc = options.get('loc', 0)
legend = options.get('legend', True)
if legend:
pyplot.legend(loc=loc)
def Show(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various pyplot functions
"""
# TODO: figure out how to show more than one plot
Config(**options)
pyplot.show()
def Save(root=None, formats=None, **options):
"""Saves the plot in the given formats.
For options, see Config.
Args:
root: string filename root
formats: list of string formats
options: keyword args used to invoke various pyplot functions
"""
Config(**options)
if formats is None:
formats = ['pdf', 'eps']
if root:
for fmt in formats:
SaveFormat(root, fmt)
Clf()
def SaveFormat(root, fmt='eps'):
"""Writes the current figure to a file in the given format.
Args:
root: string filename root
fmt: string format
"""
filename = '%s.%s' % (root, fmt)
print 'Writing', filename
pyplot.savefig(filename, format=fmt, dpi=300)
# provide aliases for calling functons with lower-case names
preplot = PrePlot
subplot = SubPlot
clf = Clf
figure = Figure
plot = Plot
scatter = Scatter
pmf = Pmf
pmfs = Pmfs
hist = Hist
hists = Hists
diff = Diff
cdf = Cdf
cdfs = Cdfs
contour = Contour
pcolor = Pcolor
config = Config
show = Show
save = Save
def main():
color_iter = Brewer.ColorGenerator(7)
for color in color_iter:
print color
if __name__ == '__main__':
main()
| gpl-3.0 |
racu10/emapy | src/emapy.py | 3 | 50258 |
# coding: utf-8
# In[1]:
#Useful emapy @racu10
import pandas as pd
import numpy as np
import overpy
import overpass
import folium
from osmapi import OsmApi
import math
import geopy
import geopy.distance
import time
import unicodedata
import sys;
reload(sys);
sys.setdefaultencoding("utf8")
MyApi = OsmApi()
apiOverPass = overpass.API()
apiOverPy = overpy.Overpass()
# In[2]:
def getDistance(long1,
lat1,
long2,
lat2):
""" getDistance(long1, lat1, long2, lat2)
Get distance betwen 2 coordinates in log/lat.
Parameters
----------
long1 : float
Longitude 1st coordinate.
lat1 : float
Latitude 1st coordinate.
long2 : float
Longitude 2nd coordinate.
lat2 : float
Latitude 2nd coordinate.
Returns
-------
float
Get the value of the distance.
"""
r = 6371000 #radio terrestre medio, en metros
c = math.pi/180 #constante para transformar grados en radianes
#Haversine distance
return 2*r*math.asin(math.sqrt(
math.sin(c*(lat2-lat1)/2)**2
+ math.cos(c*lat1)*math.cos(c*lat2)
* math.sin(c*(long2-long1)/2)**2))
# In[3]:
def getDistanceInKm(long1,
lat1,
long2,
lat2):
""" getDistanceInKm(long1, lat1, long2, lat2)
Get distance betwen 2 coordinates in log/lat.
Parameters
----------
long1 : float
Longitude 1st coordinate.
lat1 : float
Latitude 1st coordinate.
long2 : float
Longitude 2nd coordinate.
lat2 : float
Latitude 2nd coordinate.
Returns
-------
float
Get the value of the distance.
"""
pt1 = geopy.Point(long1, lat1)
pt2 = geopy.Point(long2, lat2)
return geopy.distance.distance(pt1, pt2).km
# In[4]:
def getLessDistanceInKmBtwnCoordAndInfoStructureWithJumps(posX,
posY,
allData,
jump,
isInAllData = False):
""" getLessDistanceInKmBtwnCoordAndInfoStructureWithJumps(posX, posY, allData, jump, isInAllData):
Get less distance between Info Structure and position.
Parameters
----------
posX : float
Longitude coordinate to evaluate.
posY : float
Latitude coordinate to evaluate.
allData : float
Longitude 2nd coordinate.
jump : Integer
Number of jumps to get distance.
isInAllData : Boolean
If position is in allData and want to skip own position.
Returns
-------
list
Gets in first item the distance and the sencond the full data.
"""
less = []
tmpX = posX
tmpY = posY
for x in range(jump):
actualLessDist = float("inf")
for data in allData:
d = float("inf")
if isInAllData == True:
if tmpX != data["geometry"][0] and tmpY != data["geometry"][1] and posX != data["geometry"][0] and posY != data["geometry"][1]:
d = getDistanceInKm(tmpX, tmpY, data["geometry"][0], data["geometry"][1])
else:
d = getDistanceInKm(tmpX, tmpY, data["geometry"][0], data["geometry"][1])
if d < actualLessDist:
actualLessDist = d
less = [actualLessDist,data]
if len(allData) > 0:
tmpX = less[1]["geometry"][0]
tmpY = less[1]["geometry"][1]
return less
# In[5]:
def utmToLatLng(zone,
easting,
northing,
northernHemisphere=True):
""" utmToLatLng(zone, easting, northing, northernHemisphere=True)
Tranform UTM location to Lat / Long
Parameters
----------
zone : int
Value of the zone where are coordinates getted.
easting : float
Falue from easting (X).
northing : float
Falue from northing (Y).
northernHemisphere : bool
Latitude 2nd coordinate.
Returns
-------
tupple (latitude, longitude)
Get the value of UTM into lat and long.
More info
---------
See http://www.dmap.co.uk/utmworld.htm to locate your zone and the hemisphere.
"""
if not northernHemisphere:
northing = 10000000 - northing
a = 6378137
e = 0.081819191
e1sq = 0.006739497
k0 = 0.9996
arc = northing / k0
mu = arc / (a * (1 - math.pow(e, 2) / 4.0 - 3 * math.pow(e, 4) / 64.0 - 5 * math.pow(e, 6) / 256.0))
ei = (1 - math.pow((1 - e * e), (1 / 2.0))) / (1 + math.pow((1 - e * e), (1 / 2.0)))
ca = 3 * ei / 2 - 27 * math.pow(ei, 3) / 32.0
cb = 21 * math.pow(ei, 2) / 16 - 55 * math.pow(ei, 4) / 32
cc = 151 * math.pow(ei, 3) / 96
cd = 1097 * math.pow(ei, 4) / 512
phi1 = mu + ca * math.sin(2 * mu) + cb * math.sin(4 * mu) + cc * math.sin(6 * mu) + cd * math.sin(8 * mu)
n0 = a / math.pow((1 - math.pow((e * math.sin(phi1)), 2)), (1 / 2.0))
r0 = a * (1 - e * e) / math.pow((1 - math.pow((e * math.sin(phi1)), 2)), (3 / 2.0))
fact1 = n0 * math.tan(phi1) / r0
_a1 = 500000 - easting
dd0 = _a1 / (n0 * k0)
fact2 = dd0 * dd0 / 2
t0 = math.pow(math.tan(phi1), 2)
Q0 = e1sq * math.pow(math.cos(phi1), 2)
fact3 = (5 + 3 * t0 + 10 * Q0 - 4 * Q0 * Q0 - 9 * e1sq) * math.pow(dd0, 4) / 24
fact4 = (61 + 90 * t0 + 298 * Q0 + 45 * t0 * t0 - 252 * e1sq - 3 * Q0 * Q0) * math.pow(dd0, 6) / 720
lof1 = _a1 / (n0 * k0)
lof2 = (1 + 2 * t0 + Q0) * math.pow(dd0, 3) / 6.0
lof3 = (5 - 2 * Q0 + 28 * t0 - 3 * math.pow(Q0, 2) + 8 * e1sq + 24 * math.pow(t0, 2)) * math.pow(dd0, 5) / 120
_a2 = (lof1 - lof2 + lof3) / math.cos(phi1)
_a3 = _a2 * 180 / math.pi
latitude = 180 * (phi1 - fact1 * (fact2 + fact3 + fact4)) / math.pi
if not northernHemisphere:
latitude = -latitude
longitude = ((zone > 0) and (6 * zone - 183.0) or 3.0) - _a3
return (latitude, longitude)
# In[6]:
def getDataOfCsv(name, sep=';'):
import pandas as pd
""" getDataOfCsv(name)
Load data of csv to pandas.
Parameters
----------
name : String
Path + file.csv to load.
sep : String
Separator of the csv.
Returns
-------
Pandas array
Get the structure of the CSV.
"""
allData = None
try:
allData = pd.read_csv(name, encoding = "utf8", sep=sep)
except:
allData = pd.read_csv(name, encoding = "ISO-8859-1", sep=sep)
return allData
# In[7]:
def is_number(s):
""" is_number(s)
Try what you passed if is a number value.
Parameters
----------
s : Object
Value you want to try if is a number.
Returns
-------
Boolean
Return if is a number.
"""
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
# In[8]:
def getPointOfStreet(streetName,
boundingBoxSearch):
""" getPointOfStreet(streetName, boundingBoxSearch)
Get all points of the street localizated into bounding box
Parameters
----------
streetName : float
Name of the street you are looking the points.
boundingBoxSearch : tuple
Bounding box coordinates to limit the map.
Returns
-------
OSM structure
Get all points of the street with the OSM structure with all parameters.
"""
apiOverPass = overpass.API()
sql = 'way[name~"'+streetName+'"]'+str(boundingBoxSearch).encode("utf-8")+';'
return apiOverPass.Get(sql)
# In[9]:
def getPointOfStreetPolygon(streetName,
polygon):
""" getPointOfStreet(streetName, polygon)
Get all points of the street localizated into polygon
Parameters
----------
streetName : float
Name of the street you are looking the points.
polygon : tuple
Polygon coordinates to limit the map.
Returns
-------
OSM structure
Get all points of the street with the OSM structure with all parameters.
"""
s = polygonArrayToOSMstructure(polygon)
apiOverPass = overpass.API()
sql = 'way[name~"'+streetName+'"]'+s+';'
return apiOverPass.Get(sql)
# In[10]:
def getAllStreetPointsLookingForName(allStreetInfoOSM,
streetName):
""" getAllStreetPointsLookingForName(allStreetInfoOSM, streetName)
Get list of points of all streets which all Streets in Info OSM
are the same as streetName
Parameters
----------
allStreetInfoOSM : list of dictionary
List with the OSM structure for each street.
streetName : String
Name of the string what need to be compared.
Returns
-------
List
Get all points where the street are the same.
"""
lstPoints = []
for street in allStreetInfoOSM:
if street['type'].strip().lower() == 'linestring':
if streetName.strip().lower() in street['properties'][u'name'].strip().lower():
if len(street['geometry']) > 0:
for point in street['geometry']:
if point not in lstPoints:
lstPoints.append(point)
return lstPoints
# In[11]:
def fromAllStreetsGetWithStreetNameTheLocationMostNear(allStreetInfoOSM, streetName, xtest, ytest):
lstPoints = getAllStreetPointsLookingForName(allStreetInfoOSM, streetName)
return fromPointsOfStretGetBestUbicationXY(lstPoints, xtest, ytest)
# In[12]:
def fromPointsOfStretGetBestUbicationXY(pointsOfStreet, xtest, ytest):
""" fromPointsOfStretGetBestUbicationMinXY(pointsOfStreet, xtest, ytest)
Localize the point more close to the street given with
his points using OSM features.
Parameters
----------
pointsOfStreet : List
List of points
xtest : float
Actual x coordinate to be remplaced.
ytest : tuple
Actual y coordinate to be remplaced.
Returns
-------
tuple x, y
Get the best location into the street given.
"""
cx = xtest
cy = ytest
minD = float('inf')
for c in pointsOfStreet:
y = c[1]
x = c[0]
d = getDistance(xtest, ytest, x, y)
if d < minD:
cx = x
cy = y
minD = d
return cx,cy
# In[13]:
def fromPointsOfStretGetBestUbicationMinXYOSMStructure(pointsOfStreet,
xtest,
ytest):
""" fromPointsOfStretGetBestUbicationMinXY(pointsOfStreet, xtest, ytest)
Localize the point more close to the street given with
his points using OSM features.
Parameters
----------
pointsOfStreet : float
OSM structure with linestring.
xtest : float
Actual x coordinate to be remplaced.
ytest : tuple
Actual y coordinate to be remplaced.
Returns
-------
tuple x, y
Get the best location into the street given.
"""
allCorrd = pointsOfStreet['features']
minD = float('inf')
cx = xtest
cy = ytest
for geo in allCorrd:
geometry = geo["geometry"]
if geometry["type"].upper() == "LINESTRING":
for c in geometry["coordinates"]:
y = c[0]
x = c[1]
d = getDistance(xtest, ytest, x, y)
if d < minD:
cx = x
cy = y
minD = d
return cx,cy
# In[14]:
def pandasReadJson(url):
""" pandasReadJson(url)
Tranform JSON into pandas Object
Parameters
----------
url : String
Url of the Json.
Returns
-------
pandas structure
Get all data from JSON URL.
"""
import pandas as pd
return pd.read_json(url)
# In[15]:
def getNowBikesInBcn():
""" getNowBikesInBcn()
From api citybike get json of actual moment
of the bikes in barcelona
Parameters
----------
Returns
-------
pandas structure
Get all data of citybike barcelona.
"""
apiBikes = 'http://api.citybik.es/bicing.json'
df = pandasReadJson(apiBikes)
return df
# In[16]:
def decodeToUTF8(text):
""" decodeToUTF8(text)
Decode text to UTF8
Parameters
----------
streetName : String
Text to be decoded to UTF8
Returns
-------
String
Text will be returned in UTF 8 if it can be transformed.
"""
try:
text = unicode(text, 'utf-8')
except:
return text
return text
# In[17]:
def getAllBarrisBCNPoligonBox(path = 'alldata/barris.geojson',
columnName='neighbourhood',
orderedXY = False):
""" getAllBarrisBCNPoligonBox(path)
From geojson of barris set to dicctionary with his poligon
Parameters
----------
path : String
Path of the file
columnName : String
Name of the column that contains
the String info inside properties.
Returns
-------
Dictonary
Dictinary with key is "barri" and data is the poligon.
"""
dicBarris = dict()
df = pandasReadJson(path)
for d in df.iterrows():
allData = d[1][0]
r = dict(allData)
l = r['properties']
name = str(l[columnName]).lower()
#name = name.decode('utf8')
s = r['geometry']
coord = []
if orderedXY == False:
coord = s['coordinates'][0][0]
else:
coord = transformArrYXToXYList(s['coordinates'][0][0])
dicBarris[name] = coord
return dicBarris
# In[18]:
def polygonArrayToOSMstructure(polygon):
""" polygonArrayToOSMstructure(polygon)
With a polygon array gets structure of poly for OSM sql.
Parameters
----------
polygon : Array
Array that contains the poligon separated [[lat,long],
[lat', long'], [lat'', long''], ...]
same as [[y,x],[y',x'], [y'',x''], ...]
representation of OSM return.
Returns
-------
String
Returns the string asociated for OSM sqls (poly: ...).
"""
s = '(poly:"'
for y, x in polygon[:-1]:
s += str(x)+ ' '+ str(y)+' '
s += str(polygon[-1][1])+' '+str(polygon[-1][0]) +'")'
return s
# In[19]:
def getAllNodesIntoPolygon(polygon,
timeOut = 30):
""" getAllNodesIntoPolygon(polygon)
With a polygon array gets all nodes inside them.
Parameters
----------
polygon : Array
Array that contains the poligon separated
[[lat,long], [lat', long'], [lat'', long''], ...]
same as [[y,x],[y',x'], [y'',x''], ...]
representation of OSM return.
Returns
-------
Dictonary
Dictinary with key is "barri" and data is the poligon.
"""
s = polygonArrayToOSMstructure(polygon)
sql = """node"""+ s + """;out;"""
allData = []
try:
allData = apiOverPass.Get(sql)
except:
allData = getAllNodesIntoPolygonErrorTimeOut(sql, timeOut)
return allData
# In[20]:
def getAllNodesIntoBoundingBox(boundingBoxSearch,
timeOut = 30):
""" getAllNodesIntoPolygon(polygon)
With a polygon array gets all nodes inside them.
Parameters
----------
polygon : Array
Array that contains the poligon separated
[[lat,long], [lat', long'], [lat'', long''], ...]
same as [[y,x],[y',x'], [y'',x''], ...]
representation of OSM return.
Returns
-------
Dictonary
Dictinary with key is "barri" and data is the poligon.
"""
sql = """node"""+ str(boundingBoxSearch).encode("utf-8") + """;out;"""
allData = []
try:
allData = apiOverPass.Get(sql)
except:
allData = getAllNodesIntoPolygonErrorTimeOut(sql, timeOut)
return allData
# In[21]:
def getAllNodesIntoPolygonErrorTimeOut(sql,
wait):
""" getAllNodesIntoPolygonErrorTimeOut(sql, wait)
With a query of nodes try get again the result
waiting some time in ms.
Parameters
----------
sql : String
Query to get all node info.
wait: Integer
Time what needs to wait to start the query
Returns
-------
Dictonary
Dictinary with all node info.
"""
allData = []
if wait == 0:
return allData
time.sleep(wait)
try:
allData = apiOverPass.Get(sql)
except:
allData = []
print "Time Out"
return allData
# In[22]:
def getAmenityInfoIntoPolygon(polygon,
amenityType='pub',
timeOutWaitExcept = 30):
""" getAmenityInfoIntoPolygon(polygon, amenityType='pub', timeOutWaitExcept = 30)
With a polygon array gets all amenity info inside them.
Parameters
----------
polygon : Array
Array that contains the poligon separated
[[lat,long], [lat', long'], [lat'', long''], ...]
same as [[y,x],[y',x'], [y'',x''], ...]
representation of OSM return.
amenityType : String
Tag name from amenity in OSM
timeOutWaitExcept : Integer
Time that you want to wait for other connection access
Returns
-------
Dictonary
Dictinary with all amenity info.
"""
#http://wiki.openstreetmap.org/wiki/Key:amenity
s = polygonArrayToOSMstructure(polygon)
sql = "(node[amenity='" + amenityType + "']"+ s +";);out ;"
allData = []
try:
allData = apiOverPass.Get(sql)
except:
allData = getAllNodesIntoPolygonErrorTimeOut(sql, timeOutWaitExcept)
return allData
# In[23]:
def getAmenityInfoIntoBoundingBox(boundingBoxSearch, amenityType='pub', timeOutWaitExcept = 30):
""" getAmenityInfoIntoBoundingBox(boundingBoxSearch, amenityType='pub', timeOutWaitExcept = 30)
With a bounding box array gets all amenity info inside them.
Parameters
----------
boundingBoxSearch : Array
Array that contains the poligon separated
[[lat,long], [lat', long'], [lat'', long''], ...]
same as [[y,x],[y',x'], [y'',x''], ...]
representation of OSM return.
amenityType : String
Tag name from amenity in OSM
timeOutWaitExcept : Integer
Time that you want to wait for other connection access
Returns
-------
Dictonary
Dictinary with all amenity info.
"""
#http://wiki.openstreetmap.org/wiki/Key:amenity
sql = "(node[amenity='" + amenityType + "']"+ str(boundingBoxSearch).encode("utf-8") +";);out ;"
allData = []
try:
allData = apiOverPass.Get(sql)
except:
allData = getAllNodesIntoPolygonErrorTimeOut(sql, timeOutWaitExcept)
return allData
# In[24]:
def getNodeInfo(idNode):
""" getNodeInfo(idNode):
Get all info retrieved into OSM node.
Parameters
----------
idNode : Integer
Node Id provided from OSM
Returns
-------
Dictonary
Dictinary with all info.
"""
osm = OsmApi()
T = osm.NodeGet(idNode)
return T
# In[25]:
def getInfoOfOSMSearch(feature):
""" getNodeInfo(idNode):
From feature list inside geojson Features
get a better structure for analyze data.
Parameters
----------
feature : List
All list containing all features
Returns
-------
List
List of dictionaries with all info splited by
(geometry, type, properties).
"""
feat = feature["features"]
lst = []
if len(feat) > 0:
for geo in feat:
T = dict()
r = geo["geometry"]
if r["type"].lower() == "point".lower():
T["geometry"] = tuple([r["coordinates"][1],r["coordinates"][0]])
else:
allCoord = []
for c in r["coordinates"]:
allCoord.append(tuple([c[1],c[0]]))
T["geometry"] = allCoord
T["type"] = r["type"]
T["properties"] = geo["properties"]
lst.append(T)
return lst
# In[26]:
def coordInsidePolygon(x, y, polygon):
""" coordInsidePolygon(x, y, polygon)
With a polygon array try if coord is inside.
Parameters
----------
polygon : Array
Array that contains the poligon separated [[lat,long], [lat', long'], [lat'', long''], ...] same as [[y,x],[y',x'], [y'',x''], ...]
representation of OSM return.
x : float
Coord x
y : float
Coord y
Returns
-------
Bool
Returns true/false depending if it's inside or not.
"""
n = len(polygon)
inside = False
if n > 0:
p1y, p1x = polygon[0]
for i in range(1, n + 1):
p2y, p2x = polygon[i % n]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x, p1y = p2x, p2y
return inside
# In[27]:
def getPerimeterOfDictWithPolygons(dictionary):
""" getPerimeterOfDictWithPolygons(dictionary)
Getting a dictionary with all polygons inside returns
a new polygon with the perimeter
In process
Parameters
----------
dictionary : Array
List of dictionaries with all coordinates info
Returns
-------
List
Returns list with all coordinates of perimeter.
"""
arrPoly = []
for d in dictionary:
poly = dictionary[d]
for x in poly:
if x not in arrPoly:
arrPoly.append(x)
tmpArr = []
cont = 0
for x in arrPoly:
if coordInsidePolygon(x[0], x[1], tmpArr) == False:
tmpArr.append(x)
T = tmpArr[:]
cont = 0
for t in tmpArr:
tmpArr.pop(cont)
if coordInsidePolygon(t[0], t[1], T) == False:
tmpArr.append(t)
cont += 1
print len(arrPoly)
print len(tmpArr)
return tmpArr
# In[28]:
def transformArrYXToXY(arrYX):
""" transformArrYXToXY(arrYX)
Getting a array of positions invert order.
Parameters
----------
arrYX : Array
List of positions to invert
Returns
-------
List
Returns list with all coordinates inverted inside
a tuple.
"""
points = []
for point in arrYX:
points.append(tuple([point[1], point[0]]))
return points
def transformArrYXToXYList(arrYX):
""" transformArrYXToXYList(arrYX)
Getting a array of positions invert order.
Parameters
----------
arrYX : Array
List of positions to invert
Returns
-------
List
Returns list with all coordinates inverted inside
a list.
"""
points = []
for point in arrYX:
points.append([point[1], point[0]])
return points
# In[29]:
def removeIntInxString(txt, sep = '.'):
""" removeIntInxString(txt, sep)
From text writen like "1. Text what u need"
transform that to "Text what u need"
Parameters
----------
txt : String
String what you want to be transformed
sep : Char
Separation between you don't need and
text
Returns
-------
String
Returns string with real info you need
"""
s = txt.split(sep)
rettxt = ''
if len(s) > 1:
for t in s[1: len(s) -1]:
rettxt = rettxt + t.strip() + sep
rettxt = rettxt + s[-1].strip()
return rettxt
else:
return txt.strip()
# In[30]:
def createGeoJSON(features):# [[coord1,cord2,cord3,...], row, column]
""" createGeoJSON(features)
From structure as [[coord1,cord2,cord3,...], row, column]
creates a new geoJSON used for Surface Unit
Parameters
----------
features : List
Structure as [[coord1,cord2,cord3,...], row, column]
Returns
-------
String
Returns a great formated geojson
"""
gjson = '{"type":"FeatureCollection", '
gjson += '"features": ['
numFeat = len(features)
for x in range(numFeat):
feature = features[x]
#Feature
gjson += '{ "type":"Feature",'
gjson += '"geometry":{'
gjson += '"type": "MultiPolygon", "coordinates": [[['
isFirst = 0
firstLon = -9999
firstLat = -9999
for c in feature[0]:
lon = c[0]
lat = c[1]
if isFirst == 0:
firstLon = lon
firstLat = lat
isFirst = 1
gjson += '['
gjson += str(lat)
gjson += ', '
gjson += str(lon)
gjson += '],'
gjson += '['
gjson += str(firstLat)
gjson += ', '
gjson += str(firstLon)
gjson += ']'
gjson += ']]]'
gjson += "},"
gjson += '"properties": {'
gjson += '"id" :'
if(feature[1] > 0):
gjson += str(feature[1])
gjson += str(feature[2])
gjson += '}'
if x +1 == numFeat:
gjson +='}'
else:
gjson +='},'
#End Feature
gjson += ']'
gjson += '}'
return gjson
# In[31]:
def calculateIncOfDistance(posX1,
posY1,
posX2,
posY2,
incrementKm):
""" calculateIncOfDistance(posX1,posY1,posX2,posY2,incrementKm):
From two coordinates and the increment you need in Km
get how many you need to add in each coordinate to get
the spected increment.
Parameters
----------
posX1 : float
Longitude 1st coordinate.
posY1 : float
Latitude 1st coordinate.
posX2 : float
Longitude 2nd coordinate.
posY2 : float
Latitude 2nd coordinate.
incrementKm : float
Increment in km to will get
Returns
-------
List
Returns a list with increment needed into
Y axes and Y axes position
"""
d = getDistanceInKm(posX1, posY1, posX2, posY2)
if d < incrementKm:
return posX2 - posX1
r1 = posX2 - posX1
r2 = posY2 - posY1
inc = d * 1.0 / incrementKm * 1.0
return (r1 / inc * 1.0, r2 / inc * 1.0)
# In[32]:
def createFileWithText(fullPath, ext, text):
""" createFileWithText(fullPath, ext, text)
Creates new file with text.
Parameters
----------
fullPath : String
Path + name file.
ext : Char
Extension file.
text : String
All text you want into the file.
Returns
-------
"""
import io
with io.FileIO(fullPath + '.' + ext , "w") as file:
file.write(text)
# In[33]:
def divideBoundingBoxBySurfaceUnitSavedGeoJSON(boundingBox,
surfaceXKm,
surfaceYKm,
nameGeoJSON):
""" divideBoundingBoxBySurfaceUnitSavedGeoJSON(boundingBox,
surfaceXKm,
surfaceYKm,
nameGeoJSON):
Generate the new file geojson with all Surface Units
inside them as polygon.
Parameters
----------
boundingBox : List
Bounding Box that you will divide
surfaceXKm : Float
Distance in Km of X axes for each Surface Unit.
surfaceYKm : Float
Distance in Km of Y axes for each Surface Unit.
nameGeoJSON : String
Path + name of geojson
Returns
-------
String
Returns geoson as text
"""
minLat = boundingBox[1]
minLong = boundingBox[0]
maxLat = boundingBox[3]
maxLong = boundingBox[2]
incX = calculateIncOfDistance(minLong, minLat, maxLong, minLat, surfaceXKm)[0]
incY = calculateIncOfDistance(minLong, minLat, minLong, maxLat, surfaceYKm)[1]
col = 0
row = 0
T = []
cont = 0
pos1X = minLong
pos1Y = minLat
pos2X = pos1X + incX
pos2Y = pos1Y
pos3X = pos2X
pos3Y = pos2Y + incY
pos4X = pos1X
pos4Y = pos3Y
while pos1Y < maxLat:
T.append([[[pos1X, pos1Y],[pos2X, pos2Y],[pos3X, pos3Y],[pos4X, pos4Y]], row, col])
tmpY = pos2Y + incY
while pos1X < maxLong:
pos1X = pos2X
pos1Y = pos2Y
pos2X = pos1X + incX
pos2Y = pos1Y
pos3X = pos2X
pos3Y = pos2Y + incY
pos4X = pos1X
pos4Y = pos3Y
col += 1
T.append([[[pos1X, pos1Y],[pos2X, pos2Y],[pos3X, pos3Y],[pos4X, pos4Y]], row, col])
col = 0
row += 1
pos1X = minLong
pos1Y = tmpY
pos2X = pos1X + incX
pos2Y = pos1Y
pos3X = pos2X
pos3Y = pos2Y + incY
pos4X = pos1X
pos4Y = pos3Y
geoJson = createGeoJSON(T)
createFileWithText(nameGeoJSON, 'geojson', geoJson)
return geoJson
# In[34]:
def mapCreation(centerX,
centerY):
""" mapCreation(centerX,
centerY)
Creates a new map
Parameters
----------
centerX : Float
Longitude of the center you want to see at first
centerY : Float
Latitude of the center you want to see at first
Returns
-------
Map
Returns the new map
"""
map = folium.Map(location=[centerX,centerY])
return map
# In[35]:
def mapAddMarker(map,
coordX,
coordY,
icn = 'glyphicon-certificate',
color = 'blue',
popuName = ''):
""" mapAddMarker(map,
coordX,
coordY,
icn,
color,
popuName)
Add marker to a map
Parameters
----------
map : Map
Map where want to add this coordinate
coordX : Float
Longitude of coordinate to add
coordY : Float
Latitude of coordinate to add
icn : String
Bootstrap glyphicon name
color : String
Color for marker
popuName : String
Text inside of popup
Returns
-------
"""
folium.Marker([coordX, coordY], popup=popuName,
icon = folium.Icon(icon=icn,color = color)).add_to(map)
# In[36]:
def mapAddLine(map,
arrPoints,
lineColor="#000000",
weight=2.5,
opacity=1):
""" mapAddLine(map,
arrPoints,
lineColor="#000000",
weight=2.5,
opacity=1)
Add line to a map
Parameters
----------
map : Map
Map where want to add this line
arrPoints : List
List of points to generate the line
lineColor : String
Color for Line
weight : Float
line weight
opacity : Float
Line opacity
Returns
-------
"""
folium.PolyLine(arrPoints, color=lineColor, weight=weight, opacity=opacity).add_to(map)
# In[37]:
def mapAddGeoJsonToMap(map,
pathGeoJson):
""" mapAddGeoJsonToMap(map,
pathGeoJson)
Add GeoJSON to a map
Parameters
----------
map : Map
Map where want to add this geojson
pathGeoJson : String
Path of geojson file
Returns
-------
"""
folium.GeoJson(open(pathGeoJson),
name='geojson'
).add_to(map)
# In[38]:
def mapWithMarkerCluster(map,
name):
""" mapWithMarkerCluster(map,
name)
Creates new cluster for a map
Parameters
----------
map : Map
Map where want to add this cluster
name : String
Name of cluster
Returns
-------
Cluster
Returns the new cluster for the map
"""
markerCluster = folium.MarkerCluster(name).add_to(map)
return markerCluster
def mapAddMarkerToCluster(cluster,
coordX,
coordY,
icn = 'glyphicon-certificate',
iconcolor = '#0000FF',
txtOfPoppup = "",
sizeX = 200,
sizeY = 50):
""" mapAddMarkerToCluster(cluster,
coordX,
coordY,
icn = 'glyphicon-certificate',
iconcolor = '#0000FF',
txtOfPoppup = "",
sizeX = 200,
sizeY = 50)
Add marker to a cluster
Parameters
----------
cluster : Cluster
Cluster where want to add this coordinate
coordX : Float
Longitude of coordinate to add
coordY : Float
Latitude of coordinate to add
icn : String
Bootstrap glyphicon name
iconcolor : String
Color for marker
txtOfPoppup : String
Text inside of popup
sizeX : Int
Width of popup
sizeY : Int
Height of popup
Returns
-------
"""
poppin = folium.Popup(html=folium.element.IFrame(html=txtOfPoppup,width=sizeX,height=sizeY))
folium.Marker([coordX,coordY], icon=folium.Icon(icon=icn, color=iconcolor),popup = poppin).add_to(cluster)
# In[39]:
def mapAddRegularPolygonMarker(map,
points,
color = '#0000FF',
txtOfPoppup = "",
sizeX = 200,
sizeY = 50):
""" mapAddRegularPolygonMarker(map,
points,
color = '#0000FF',
txtOfPoppup = "",
sizeX = 200,
sizeY = 50)
Add a regular polygon to a map
Parameters
----------
map : Map
Map where want to add this coordinate
points : List
List of polygon coordinates
color : String
Color for marker
txtOfPoppup : String
Text inside of popup
sizeX : Int
Width of popup
sizeY : Int
Height of popup
Returns
-------
"""
poppin = folium.Popup(html=folium.element.IFrame(html=txtOfPoppup,width=sizeX,height=sizeY))
folium.RegularPolygonMarker(points, weight=2.5, opacity=1, fill_color=color, fill_opacity=1, popup=poppin).add_to(map)
# In[40]:
def mapAddStructureSimpleMarker(map,
allData,
icn = 'glyphicon-certificate',
color = 'blue',
popupPropertie = 'name'):
""" mapAddStructureSimpleMarker(map,
allData,
icn = 'glyphicon-certificate',
color = 'blue',
popupPropertie = 'name')
From Structure Info add all coordinates inside a map.
Parameters
----------
map : Map
Map where want to add this coordinate
allData : Structure Info OSM
List of Structure Info OSM
icn : String
Bootstrap glyphicon name
color : String
Color for markers
popupPropertie : String
From properties inside of Structure Info OSM
get tag name that will be shown inside of popup
Returns
-------
"""
dataNames = []
idNodes = []
for data in allData:
if data['type'].strip().lower() == 'point':
prop = data['properties']
name = ''
if popupPropertie in prop:
name = prop[popupPropertie]
dataNames.append(name)
idNode = str(data['geometry'][0]) + str(data['geometry'][1]) + name
if idNode not in idNodes:
idNodes.append(idNode)
mapAddMarker(
map,
data['geometry'][0],
data['geometry'][1],
icn,
color,
name)
# In[41]:
def mapChoropleth(map,
geojsonPath = 'myGeoJSON.geojson',
pathKeyGeoJSON = 'feature.properties.cartodb_id',
pandasDataFrame = None,
columnKey = 'Key',
columData = 'Data',
fillColor = 'YlGn',
fillOpacity = 0.7,
lineOpacity = 0.3,
threshold_scale = [],
legendName = ''):
""" mapChoropleth(map,
geojsonPath,
pathKeyGeoJSON,
pandasDataFrame,
columnKey,
columData,
fillColor,
fillOpacity,
lineOpacity,
threshold_scale,
legendName)
Add marker to a map
Parameters
----------
map : Map
Map where want to add this Choropleth
geojsonPath : String
Path of geojson file
pathKeyGeoJSON : String
Path of the key of geojson
pandasDataFrame : Pandas Dataframe
Dataframe with all info
columnKey : String
Same key as GeoJSON Key
columData : String
Column of dataframe what will shown
into map
fillColor : String
Can pass a hex code, color name,
or if you are binding data, one of
the following color brewer palettes:
‘BuGn’, ‘BuPu’, ‘GnBu’, ‘OrRd’, ‘PuBu’,
‘PuBuGn’, ‘PuRd’, ‘RdPu’, ‘YlGn’, ‘YlGnBu’,
‘YlOrBr’, and ‘YlOrRd’.
fillOpacity : Float
Opacity of Choropleth
lineOpacity : Float
Opacity line of Choropleth
threshold_scale : List
List of Floats that will want divide color list
max(len(threshold_scale)) is 6
legendName : String
Text of legend
Returns
-------
"""
map.choropleth(geo_path=geojsonPath,
data=pandasDataFrame,
columns=[columnKey, columData],
key_on= pathKeyGeoJSON,
fill_color=fillColor,
fill_opacity=fillOpacity,
line_opacity=lineOpacity,
threshold_scale = threshold_scale,
legend_name=legendName)
# In[42]:
def mapSave(map,
saveFileName = 'map.html'):
""" mapSave(map,
saveFileName)
Save map into a path + name + .html
Parameters
----------
map : Map
Map where want to add this coordinate
saveFileName : String
Path + name + .html to save the map
Returns
-------
"""
map.save(saveFileName)
# In[43]:
def transformPandasToStructureInfo(pd,
type = 'point',
colPolygonOrX = 0,
colY = 0,
ifIsPolygonIsXY = True,
isUTM = False,
zoneUTM = 31,
northernHemisphere = True
):
""" transformPandasToStructureInfo(pd,
type = 'point',
colPolygonOrX = 0,
colY = 0,
ifIsPolygonIsXY = True,
isUTM = False,
zoneUTM = 31,
northernHemisphere = True
)
Save map into a path + name + .html
Parameters
----------
pd : Pandas Dataframe
Pandas dataframe
type : String
Type of coordinates we have inside
our Pandas Dataframe 'point' or 'polygon' or ...
colPolygonOrX : Integer / String
Column Id where have our coordinate longitude
or polygon
colY : Integer / String
Column Id where have our latiude if
we have
ifIsPolygonIsXY : Boolean
If we have a polygon are in order
Longitude and Latitude?
isUTM : Boolean
Our coordinates are reprsented into UTM?
zoneUTM : Integer
If had coordinates represented in UTM
need put zone
northernHemisphere : Boolean
If had coordinates represented in UTM
are represented in northernHemisphere?
Returns
-------
"""
lst = []
for d in pd[:].iterrows():
if d[1][1] != -1:
T = dict()
tmp = dict()
tmp = d[1]
T["type"] = type
if type.strip().lower() == 'point':
x = tmp[colPolygonOrX]
y = tmp[colY]
if is_number(tmp[colPolygonOrX]) == False:
x = float(tmp[colPolygonOrX].replace(',', '.'))
if is_number(tmp[colY]) == False:
y = float(tmp[colY].replace(',', '.'))
if isUTM == True:
x = float(x)
y = float(y)
x, y = utmToLatLng(zoneUTM, x, y)
T["geometry"] = tuple([x,y])
else:
polyOrX = tmp[colPolygonOrX]
if ifIsPolygonIsXY == False:
polyOrX = transformArrYXToXY(polyOrX)
T["geometry"] = polyOrX
T["properties"] = tmp
lst.append(T)
return lst
def getDatabase(name,
extension,
path = "" ,
sep = "",
isPolygon = False,
colPolygonOrLong = 0,
colLat = 1,
columnName = '',
ifIsPolygonIsXY = True,
isUTM = False,
zoneUTM = 31,
northernHemisphere = True):
""" getDatabase(name,
extension,
path,
sep,
isPolygon,
colPolygonOrLong,
colLat = 1, columnName,
ifIsPolygonIsXY,
isUTM,
zoneUTM,
northernHemisphere)
Save map into a path + name + .html
Parameters
----------
name : String
Name for database
extension : String
Type of data recived like 'csv' or
'json' or 'geojson' or bcnbikes'
or 'df' or 'pandas'
path : String
If this data base needs a path, add the path
sep : String
If data is like csv and need to be parsed
set the separation type you need as ';'
isPolygon : Boolean
Data is represented as polygon?
colPolygonOrLong : Integer / String
Column Id where have our coordinate longitude
or polygon
colLat : Integer / String
Column Id where have our latiude if
we have
columnName : Integer / String
Column Id where have our latiude if
we have
ifIsPolygonIsXY : Boolean
If we have a polygon are in order
Longitude and Latitude?
isUTM : Boolean
Our coordinates are reprsented into UTM?
zoneUTM : Integer
If had coordinates represented in UTM
need put zone
northernHemisphere : Boolean
If had coordinates represented in UTM
are represented in northernHemisphere?
Returns
-------
Structure Info
Returns a list with [name, structureInfo of the data]
"""
allData = []
name = name.strip().lower()
if extension.strip().lower() == 'csv':
allData = getDataOfCsv(path.strip().lower(), sep)
if isPolygon == False:
return [name, transformPandasToStructureInfo(allData, 'point', colPolygonOrLong, colLat, ifIsPolygonIsXY, isUTM,zoneUTM,northernHemisphere)]
else:
return [name, transformPandasToStructureInfo(allData, 'polygon', colPolygonOrLong, colLat, ifIsPolygonIsXY, isUTM,zoneUTM,northernHemisphere)]
elif extension.strip().lower() == 'json':
allData = pandasReadJson(path)
if isPolygon == False:
return [name, transformPandasToStructureInfo(allData, 'point', colPolygonOrLong, colLat, ifIsPolygonIsXY, isUTM,zoneUTM,northernHemisphere)]
else:
return [name, transformPandasToStructureInfo(allData, 'polygon', colPolygonOrLong, colLat, ifIsPolygonIsXY, isUTM,zoneUTM,northernHemisphere)]
elif extension.strip().lower() == 'geojson':
return [name, getAllBarrisBCNPoligonBox(path, columnName, True)]
elif extension.strip().lower() == 'bcnbikes':
return [name, transformPandasToStructureInfo(getNowBikesInBcn(), 'point', 'lng', 'lat', True, True, 31)]
elif extension.strip().lower() == 'df' or extension.strip().lower() == 'pandas':
if isPolygon == False:
return [name, transformPandasToStructureInfo(path, 'point', colPolygonOrLong, colLat, ifIsPolygonIsXY, isUTM,zoneUTM,)]
else:
return [name, transformPandasToStructureInfo(path, 'polygon', colPolygonOrLong, colLat, ifIsPolygonIsXY, isUTM,zoneUTM)]
return allData
# In[44]:
def getDatabaseFromOSM(name,
type = 'amenity|node|way',
searchByPolygon = True,
ifIsPolygonIsXY = True,
boundingBoxOrPolygon = [],
keyWord = '',
timeOutWait = 30):
""" getDatabaseFromOSM(name,
type = 'amenity|node|way',
searchByPolygon = True,
ifIsPolygonIsXY = True,
boundingBoxOrPolygon = [],
keyWord = '',
timeOutWait = 30)
Save map into a path + name + .html
Parameters
----------
name : String
Name for database
type : String
Type of data we want to search as 'amenity' or
'node' or 'way'
searchByPolygon : Boolean
We want to search by polygon?
ifIsPolygonIsXY : Boolean
If is a polygon, are represented as Longitude and Latitude
boundingBoxOrPolygon : List
List with the polygon or Bounding Box to start search
inside OSM
keyWord : String
String to search inside all nodes of OSM depending type:
Amenity: Need to add the amenity tag
-> #http://wiki.openstreetmap.org/wiki/Key:amenity
Node: Some info that nodes have
Way: Some info of node way
timeOutWait : Integer
Timeout for giving a time if data can't getted at first time
by the too many queries exception
Returns
-------
Structure Info
Returns a structureInfo of the data
"""
if searchByPolygon == True and ifIsPolygonIsXY == True:
boundingBoxOrPolygon = transformArrYXToXYList(boundingBoxOrPolygon)
allData = []
name = name.strip().lower()
if type.strip().lower() == 'amenity':
if searchByPolygon == True:
return getInfoOfOSMSearch(getAmenityInfoIntoPolygon(boundingBoxOrPolygon, keyWord, timeOutWait))
else:
return getInfoOfOSMSearch(getAmenityInfoIntoBoundingBox(boundingBoxOrPolygon, keyWord, timeOutWait))
elif type.strip().lower() == 'node':
if searchByPolygon == True:
return getInfoOfOSMSearch(getAllNodesIntoPolygon(boundingBoxOrPolygon, timeOutWait))
else:
return getInfoOfOSMSearch(getAllNodesIntoBoundingBox(boundingBoxOrPolygon, timeOutWait))
elif type.strip().lower() == 'way':
if searchByPolygon == True:
return getInfoOfOSMSearch(getPointOfStreetPolygon(keyWord, boundingBoxOrPolygon))
else:
return getInfoOfOSMSearch(getPointOfStreet(keyWord, boundingBoxOrPolygon))
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| bsd-3-clause |
SixTrack/SixTrackLib | examples/python/test_footprint_from_sixtrack_input/003_gen_coord_for_footprint.py | 1 | 2101 | import pickle
import pysixtrack
import numpy as np
import helpers as hp
import footprint
epsn_x = 3.5e-6
epsn_y = 3.5e-6
r_max_sigma = 6.
N_r_footp = 10.
N_theta_footp = 10.
n_turns_beta = 150
with open('line.pkl', 'rb') as fid:
line = pysixtrack.Line.from_dict(pickle.load(fid))
with open('particle_on_CO.pkl', 'rb') as fid:
partCO = pickle.load(fid)
part = pysixtrack.Particles(**partCO)
# Track a particle to get betas
part.x += 1e-5
part.y += 1e-5
x_tbt, px_tbt, y_tbt, py_tbt, sigma_tbt, delta_tbt = hp.track_particle_pysixtrack(
line, part=part, Dx_wrt_CO_m=0., Dpx_wrt_CO_rad=0.,
Dy_wrt_CO_m=0., Dpy_wrt_CO_rad=0.,
Dsigma_wrt_CO_m=0., Ddelta_wrt_CO=0., n_turns=n_turns_beta, verbose=True)
beta_x, x_max, px_cut = hp.betafun_from_ellip(x_tbt, px_tbt)
beta_y, y_max, py_cut = hp.betafun_from_ellip(y_tbt, py_tbt)
sigmax = np.sqrt(beta_x * epsn_x / part.beta0 / part.gamma0)
sigmay = np.sqrt(beta_y * epsn_y / part.beta0 / part.gamma0)
xy_norm = footprint.initial_xy_polar(
r_min=1e-2,
r_max=r_max_sigma,
r_N=N_r_footp + 1,
theta_min=np.pi / 100,
theta_max=np.pi / 2 - np.pi / 100,
theta_N=N_theta_footp)
DpxDpy_wrt_CO = np.zeros_like(xy_norm)
for ii in range(xy_norm.shape[0]):
for jj in range(xy_norm.shape[1]):
DpxDpy_wrt_CO[ii, jj, 0] = xy_norm[ii, jj, 0] * \
np.sqrt(epsn_x / part.beta0 / part.gamma0 / beta_x)
DpxDpy_wrt_CO[ii, jj, 1] = xy_norm[ii, jj, 1] * \
np.sqrt(epsn_y / part.beta0 / part.gamma0 / beta_y)
with open('DpxDpy_for_footprint.pkl', 'wb') as fid:
pickle.dump({
'DpxDpy_wrt_CO': DpxDpy_wrt_CO,
'xy_norm': xy_norm,
}, fid)
import matplotlib.pyplot as plt
plt.close('all')
fig1 = plt.figure(1)
spx = fig1.add_subplot(2, 1, 1)
spy = fig1.add_subplot(2, 1, 2, sharex=spx)
spx.plot(x_tbt)
spy.plot(y_tbt)
fig2 = plt.figure(2)
spex = fig2.add_subplot(2, 1, 1)
spey = fig2.add_subplot(2, 1, 2)
spex.plot(x_tbt, px_tbt, '.')
spey.plot(y_tbt, py_tbt, '.')
spex.plot(0, px_cut, 'xr')
spey.plot(0, py_cut, 'xr')
plt.show()
| lgpl-2.1 |
botswana-harvard/edc-rdb | bcpp_rdb/old/rdb/query.py | 1 | 1515 | import os
import pandas as pd
from datetime import datetime
from bcpp_rdb.private_settings import Rdb
from sqlalchemy import create_engine
engine = create_engine('postgresql://{user}:{password}@{host}/{db}'.format(
user=Rdb.user, password=Rdb.password, host=Rdb.host, db=Rdb.name))
class Pims(object):
def __init__(self):
self._df_pims = pd.DataFrame()
def tables(self):
return [
'dimcommonstudyparticipant',
'dimcurrentpimspatient',
'dimpimsappointmentvisit'
'factpimshivtest',
'factpimshaartinitiation',
]
def import_pg_table(self, name):
with engine.connect() as conn, conn.begin():
df = pd.read_sql_query('select * from dw.{}'.format(name), conn)
df.to_csv(os.path.expanduser('~/{}.csv'.format(name)))
def df_pims(self):
if self._df_pims.empty:
df = pd.read_csv(
os.path.expanduser('/Users/erikvw/Documents/bcpp/dimcurrentpimspatient.csv'),
low_memory=False)
df['age'] = (datetime.today() - df['dob']).astype('<m8[Y]')
df = df.sort_values(['idno', 'regdate'], ascending=[True, False])
df = df.drop_duplicates('idno')
df = df[(df['citizenship'].isin(['Citizen', 'Spouse of citizen'])) &
(df['age'] >= 16) &
(df['age'] <= 64) &
(pd.notnull(df['dob']))]
self._df_pims = df
return self._df_pims
| gpl-2.0 |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/pandas/tseries/tdi.py | 9 | 31432 | """ implement the TimedeltaIndex """
from datetime import timedelta
import numpy as np
from pandas.core.common import (ABCSeries, _TD_DTYPE, _INT64_DTYPE,
is_timedelta64_dtype, _maybe_box,
_values_from_object, isnull, is_integer, is_float)
from pandas.core.index import Index, Int64Index
import pandas.compat as compat
from pandas.compat import u
from pandas.util.decorators import cache_readonly
from pandas.tseries.frequencies import to_offset
import pandas.core.common as com
from pandas.tseries import timedeltas
from pandas.tseries.base import DatetimeIndexOpsMixin
from pandas.tseries.timedeltas import to_timedelta, _coerce_scalar_to_timedelta_type
import pandas.tseries.offsets as offsets
from pandas.tseries.offsets import Tick, DateOffset
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.index as _index
Timedelta = tslib.Timedelta
_resolution_map = {
'ns' : offsets.Nano,
'us' : offsets.Micro,
'ms' : offsets.Milli,
's' : offsets.Second,
'm' : offsets.Minute,
'h' : offsets.Hour,
'D' : offsets.Day,
}
def _td_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert timedelta-like to timedelta64
"""
def wrapper(self, other):
func = getattr(super(TimedeltaIndex, self), opname)
if _is_convertible_to_td(other):
other = _to_m8(other)
result = func(other)
if com.isnull(other):
result.fill(nat_result)
else:
if not com.is_list_like(other):
raise TypeError("cannot compare a TimedeltaIndex with type {0}".format(type(other)))
other = TimedeltaIndex(other).values
result = func(other)
result = _values_from_object(result)
if isinstance(other, Index):
o_mask = other.values.view('i8') == tslib.iNaT
else:
o_mask = other.view('i8') == tslib.iNaT
if o_mask.any():
result[o_mask] = nat_result
mask = self.asi8 == tslib.iNaT
if mask.any():
result[mask] = nat_result
# support of bool dtype indexers
if com.is_bool_dtype(result):
return result
return Index(result)
return wrapper
class TimedeltaIndex(DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray of timedelta64 data, represented internally as int64, and
which can be boxed to timedelta objects
Parameters
----------
data : array-like (1-dimensional), optional
Optional timedelta-like data to construct index with
unit: unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional
which is an integer/float number
freq: a frequency for the index, optional
copy : bool
Make a copy of input ndarray
start : starting value, timedelta-like, optional
If data is None, start is used as the start point in generating regular
timedelta data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, timedelta-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
name : object
Name to be stored in the index
"""
_typ = 'timedeltaindex'
_join_precedence = 10
def _join_i8_wrapper(joinf, **kwargs):
return DatetimeIndexOpsMixin._join_i8_wrapper(joinf, dtype='m8[ns]', **kwargs)
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
_algos.left_join_indexer_unique_int64, with_indexers=False)
_arrmap = None
_datetimelike_ops = ['days','seconds','microseconds','nanoseconds',
'freq','components']
__eq__ = _td_index_cmp('__eq__')
__ne__ = _td_index_cmp('__ne__', nat_result=True)
__lt__ = _td_index_cmp('__lt__')
__gt__ = _td_index_cmp('__gt__')
__le__ = _td_index_cmp('__le__')
__ge__ = _td_index_cmp('__ge__')
_engine_type = _index.TimedeltaEngine
_comparables = ['name', 'freq']
_attributes = ['name', 'freq']
_is_numeric_dtype = True
freq = None
def __new__(cls, data=None, unit=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None,
closed=None, verify_integrity=True, **kwargs):
if isinstance(data, TimedeltaIndex) and freq is None and name is None:
if copy:
data = data.copy()
return data
freq_infer = False
if not isinstance(freq, DateOffset):
# if a passed freq is None, don't infer automatically
if freq != 'infer':
freq = to_offset(freq)
else:
freq_infer = True
freq = None
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if data is None and freq is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, freq,
closed=closed)
if unit is not None:
data = to_timedelta(data, unit=unit, box=False)
if not isinstance(data, (np.ndarray, Index, ABCSeries)):
if np.isscalar(data):
raise ValueError('TimedeltaIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# convert if not already
if getattr(data,'dtype',None) != _TD_DTYPE:
data = to_timedelta(data,unit=unit,box=False)
elif copy:
data = np.array(data,copy=True)
# check that we are matching freqs
if verify_integrity and len(data) > 0:
if freq is not None and not freq_infer:
index = cls._simple_new(data, name=name)
inferred = index.inferred_freq
if inferred != freq.freqstr:
on_freq = cls._generate(index[0], None, len(index), name, freq)
if not np.array_equal(index.asi8, on_freq.asi8):
raise ValueError('Inferred frequency {0} from passed timedeltas does not '
'conform to passed frequency {1}'.format(inferred, freq.freqstr))
index.freq = freq
return index
if freq_infer:
index = cls._simple_new(data, name=name)
inferred = index.inferred_freq
if inferred:
index.freq = to_offset(inferred)
return index
return cls._simple_new(data, name=name, freq=freq)
@classmethod
def _generate(cls, start, end, periods, name, offset, closed=None):
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Must specify two of start, end, or periods')
if start is not None:
start = Timedelta(start)
if end is not None:
end = Timedelta(end)
left_closed = False
right_closed = False
if start is None and end is None:
if closed is not None:
raise ValueError("Closed has to be None if not both of start"
"and end are defined")
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed has to be either 'left', 'right' or None")
index = _generate_regular_range(start, end, periods, offset)
index = cls._simple_new(index, name=name, freq=offset)
if not left_closed:
index = index[1:]
if not right_closed:
index = index[:-1]
return index
@property
def _box_func(self):
return lambda x: Timedelta(x, unit='ns')
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
if not getattr(values,'dtype',None):
values = np.array(values,copy=False)
if values.dtype == np.object_:
values = tslib.array_to_timedelta64(values)
if values.dtype != _TD_DTYPE:
values = com._ensure_int64(values).view(_TD_DTYPE)
result = object.__new__(cls)
result._data = values
result.name = name
result.freq = freq
result._reset_identity()
return result
_na_value = tslib.NaT
"""The expected NA value to use with this index."""
@property
def _formatter_func(self):
from pandas.core.format import _get_format_timedelta64
return _get_format_timedelta64(self, box=True)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(TimedeltaIndex, self).__setstate__(state)
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
freq = attrs.get('freq', None)
if freq is not None:
# no need to infer if freq is None
attrs['freq'] = 'infer'
return attrs
def _add_delta(self, delta):
if isinstance(delta, (Tick, timedelta, np.timedelta64)):
new_values = self._add_delta_td(delta)
name = self.name
elif isinstance(delta, TimedeltaIndex):
new_values = self._add_delta_tdi(delta)
# update name when delta is index
name = com._maybe_match_name(self, delta)
else:
raise ValueError("cannot add the type {0} to a TimedeltaIndex".format(type(delta)))
result = TimedeltaIndex(new_values, freq='infer', name=name)
return result
def _evaluate_with_timedelta_like(self, other, op, opstr):
# allow division by a timedelta
if opstr in ['__div__','__truediv__']:
if _is_convertible_to_td(other):
other = Timedelta(other)
if isnull(other):
raise NotImplementedError("division by pd.NaT not implemented")
i8 = self.asi8
result = i8/float(other.value)
result = self._maybe_mask_results(result,convert='float64')
return Index(result,name=self.name,copy=False)
return NotImplemented
def _add_datelike(self, other):
# adding a timedeltaindex to a datetimelike
from pandas import Timestamp, DatetimeIndex
other = Timestamp(other)
i8 = self.asi8
result = i8 + other.value
result = self._maybe_mask_results(result,fill_value=tslib.iNaT)
return DatetimeIndex(result,name=self.name,copy=False)
def _sub_datelike(self, other):
raise TypeError("cannot subtract a datelike from a TimedeltaIndex")
def _format_native_types(self, na_rep=u('NaT'),
date_format=None, **kwargs):
from pandas.core.format import Timedelta64Formatter
return Timedelta64Formatter(values=self,
nat_rep=na_rep,
justify='all').get_result()
def _get_field(self, m):
values = self.asi8
hasnans = self.hasnans
if hasnans:
result = np.empty(len(self), dtype='float64')
mask = values == tslib.iNaT
imask = ~mask
result.flat[imask] = np.array([ getattr(Timedelta(val),m) for val in values[imask] ])
result[mask] = np.nan
else:
result = np.array([ getattr(Timedelta(val),m) for val in values ],dtype='int64')
return result
@property
def days(self):
""" Number of days for each element. """
return self._get_field('days')
@property
def seconds(self):
""" Number of seconds (>= 0 and less than 1 day) for each element. """
return self._get_field('seconds')
@property
def microseconds(self):
""" Number of microseconds (>= 0 and less than 1 second) for each element. """
return self._get_field('microseconds')
@property
def nanoseconds(self):
""" Number of nanoseconds (>= 0 and less than 1 microsecond) for each element. """
return self._get_field('nanoseconds')
@property
def components(self):
"""
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame
"""
from pandas import DataFrame
columns = ['days','hours','minutes','seconds','milliseconds','microseconds','nanoseconds']
hasnans = self.hasnans
if hasnans:
def f(x):
if isnull(x):
return [np.nan]*len(columns)
return x.components
else:
def f(x):
return x.components
result = DataFrame([ f(x) for x in self ])
result.columns = columns
if not hasnans:
result = result.astype('int64')
return result
def total_seconds(self):
"""
Total duration of each element expressed in seconds.
.. versionadded:: 0.17.0
"""
return self._maybe_mask_results(1e-9*self.asi8)
def to_pytimedelta(self):
"""
Return TimedeltaIndex as object ndarray of datetime.timedelta objects
Returns
-------
datetimes : ndarray
"""
return tslib.ints_to_pytimedelta(self.asi8)
def astype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return self.asobject
elif dtype == _INT64_DTYPE:
return self.asi8.copy()
elif dtype == _TD_DTYPE:
return self
elif dtype.kind == 'm':
# return an index (essentially this is division)
result = self.values.astype(dtype)
if self.hasnans:
return Index(self._maybe_mask_results(result,convert='float64'),name=self.name)
return Index(result.astype('i8'),name=self.name)
else: # pragma: no cover
raise ValueError('Cannot cast TimedeltaIndex to dtype %s' % dtype)
def union(self, other):
"""
Specialized union for TimedeltaIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : TimedeltaIndex or array-like
Returns
-------
y : Index or TimedeltaIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, TimedeltaIndex):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
this, other = self, other
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, TimedeltaIndex):
if result.freq is None:
result.freq = to_offset(result.inferred_freq)
return result
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if isinstance(obj, Index) and obj.name != name:
name = None
break
to_concat = self._ensure_compat_concat(to_concat)
return Index(com._concat_compat(to_concat), name=name)
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
if _is_convertible_to_index(other):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
return Index.join(self, other, how=how, level=level,
return_indexers=return_indexers)
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, TimedeltaIndex) and self.freq == other.freq
and self._can_fast_union(other)):
joined = self._shallow_copy(joined)
joined.name = name
return joined
else:
return self._simple_new(joined, name)
def _can_fast_union(self, other):
if not isinstance(other, TimedeltaIndex):
return False
freq = self.freq
if freq is None or freq != other.freq:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
return (right_start == left_end + freq) or right_start in left
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_start, left_end = left[0], left[-1]
right_end = right[-1]
# concatenate
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = com._concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self._simple_new(result, name=name, freq=None)
def intersection(self, other):
"""
Specialized intersection for TimedeltaIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : TimedeltaIndex or array-like
Returns
-------
y : Index or TimedeltaIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, TimedeltaIndex):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
result = Index.intersection(self, other)
return result
if len(self) == 0:
return self
if len(other) == 0:
return other
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
def _possibly_promote(self, other):
if other.inferred_type == 'timedelta':
other = TimedeltaIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if _is_convertible_to_td(key):
key = Timedelta(key)
return self.get_value_maybe_box(series, key)
try:
return _maybe_box(self, Index.get_value(self, series, key), series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
def get_value_maybe_box(self, series, key):
if not isinstance(key, Timedelta):
key = Timedelta(key)
values = self._engine.get_value(_values_from_object(series), key)
return _maybe_box(self, values, series, key)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance)
if _is_convertible_to_td(key):
key = Timedelta(key)
return Index.get_loc(self, key, method, tolerance)
try:
return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
try:
stamp = Timedelta(key)
return Index.get_loc(self, stamp, method, tolerance)
except (KeyError, ValueError):
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string, cast it to timedelta according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : string / None
Returns
-------
label : object
"""
if isinstance(label, compat.string_types):
parsed = _coerce_scalar_to_timedelta_type(label, box=True)
lbound = parsed.round(parsed.resolution)
if side == 'left':
return lbound
else:
return (lbound + _resolution_map[parsed.resolution]() -
Timedelta(1, 'ns'))
elif is_integer(label) or is_float(label):
self._invalid_indexer('slice',label)
return label
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
if is_integer(key) or is_float(key):
self._invalid_indexer('slice',key)
loc = self._partial_td_slice(key, freq, use_lhs=use_lhs,
use_rhs=use_rhs)
return loc
def _partial_td_slice(self, key, freq, use_lhs=True, use_rhs=True):
# given a key, try to figure out a location for a partial slice
if not isinstance(key, compat.string_types):
return key
parsed = _coerce_scalar_to_timedelta_type(key, box=True)
is_monotonic = self.is_monotonic
# figure out the resolution of the passed td
# and round to it
reso = parsed.resolution
t1 = parsed.round(reso)
t2 = t1 + _resolution_map[reso]() - Timedelta(1,'ns')
stamps = self.asi8
if is_monotonic:
# we are out of range
if len(stamps) and (
(use_lhs and t1.value < stamps[0] and t2.value < stamps[0]) or (
(use_rhs and t1.value > stamps[-1] and t2.value > stamps[-1]))):
raise KeyError
# a monotonic (sorted) series can be sliced
left = stamps.searchsorted(t1.value, side='left') if use_lhs else None
right = stamps.searchsorted(t2.value, side='right') if use_rhs else None
return slice(left, right)
lhs_mask = (stamps >= t1.value) if use_lhs else True
rhs_mask = (stamps <= t2.value) if use_rhs else True
# try to find a the dates
return (lhs_mask & rhs_mask).nonzero()[0]
def searchsorted(self, key, side='left'):
if isinstance(key, (np.ndarray, Index)):
key = np.array(key, dtype=_TD_DTYPE, copy=False)
else:
key = _to_m8(key)
return self.values.searchsorted(key, side=side)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'timedelta'
@property
def inferred_type(self):
return 'timedelta64'
@property
def dtype(self):
return _TD_DTYPE
@property
def is_all_dates(self):
return True
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if (not hasattr(other, 'inferred_type') or
other.inferred_type != 'timedelta64'):
try:
other = TimedeltaIndex(other)
except:
return False
return np.array_equal(self.asi8, other.asi8)
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
# try to convert if possible
if _is_convertible_to_td(item):
try:
item = Timedelta(item)
except:
pass
freq = None
if isinstance(item, (Timedelta, tslib.NaTType)):
# check freq can be preserved on edge cases
if self.freq is not None:
if (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = _to_m8(item)
try:
new_tds = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
return TimedeltaIndex(new_tds, name=self.name, freq=freq)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item,compat.string_types):
return self.asobject.insert(loc, item)
raise TypeError("cannot insert TimedeltaIndex with incompatible label")
def delete(self, loc):
"""
Make a new DatetimeIndex with passed location(s) deleted.
Parameters
----------
loc: int, slice or array of ints
Indicate which sub-arrays to remove.
Returns
-------
new_index : TimedeltaIndex
"""
new_tds = np.delete(self.asi8, loc)
freq = 'infer'
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if com.is_list_like(loc):
loc = lib.maybe_indices_to_slice(com._ensure_int64(np.array(loc)), len(self))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
return TimedeltaIndex(new_tds, name=self.name, freq=freq)
TimedeltaIndex._add_numeric_methods()
TimedeltaIndex._add_logical_methods_disabled()
TimedeltaIndex._add_datetimelike_methods()
def _is_convertible_to_index(other):
""" return a boolean whether I can attempt conversion to a TimedeltaIndex """
if isinstance(other, TimedeltaIndex):
return True
elif (len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer','integer',
'mixed-integer-float', 'mixed')):
return True
return False
def _is_convertible_to_td(key):
return isinstance(key, (DateOffset, timedelta, Timedelta, np.timedelta64, compat.string_types))
def _to_m8(key):
'''
Timedelta-like => dt64
'''
if not isinstance(key, Timedelta):
# this also converts strings
key = Timedelta(key)
# return an type that can be compared
return np.int64(key.value).view(_TD_DTYPE)
def _generate_regular_range(start, end, periods, offset):
stride = offset.nanos
if periods is None:
b = Timedelta(start).value
e = Timedelta(end).value
e += stride - e % stride
elif start is not None:
b = Timedelta(start).value
e = b + periods * stride
elif end is not None:
e = Timedelta(end).value + stride
b = e - periods * stride
else:
raise ValueError("at least 'start' or 'end' should be specified "
"if a 'period' is given.")
data = np.arange(b, e, stride, dtype=np.int64)
data = TimedeltaIndex._simple_new(data, None)
return data
def timedelta_range(start=None, end=None, periods=None, freq='D',
name=None, closed=None):
"""
Return a fixed frequency timedelta index, with day as the default
frequency
Parameters
----------
start : string or timedelta-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
name : str, default None
Name of the resulting index
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
Returns
-------
rng : TimedeltaIndex
"""
return TimedeltaIndex(start=start, end=end, periods=periods,
freq=freq, name=name,
closed=closed)
| apache-2.0 |
grundgruen/powerline | tests/test_history.py | 2 | 15600 | from unittest import TestCase
import numpy as np
import pandas as pd
from zipline.history.history import HistorySpec
from zipline.protocol import BarData
from zipline.finance.trading import TradingEnvironment
from powerline.history.history_container import EpexHistoryContainer
__author__ = 'Max'
class TestHistory(TestCase):
"""
Testing the powerline history, mainly throught mock data. These data are
created in the create_* functions below the test_* functions.
"""
@classmethod
def setUpClass(cls):
"""
sets up the objects which can be used during all the tests. This avoids
reinitialising commonly used objects.
"""
start_date = pd.Timestamp('2015-07-06', tz='Europe/Berlin').\
tz_convert('UTC')
end_date = pd.Timestamp('2015-07-10', tz='Europe/Berlin').\
tz_convert('UTC')
cls.days = pd.date_range(start_date, end_date)
cls.env = TradingEnvironment()
cls.bar_count = 3
history_spec = HistorySpec(bar_count=cls.bar_count, frequency='1m',
field='price', ffill=False,
data_frequency='minute', env=cls.env)
cls.history_specs = {}
cls.history_specs[history_spec.key_str] = history_spec
cls.market_forms = ['epex_auction', 'intraday']
cls.hourly_products = ["%(a)02d-%(b)02d" % {'a': i, 'b': i + 1}
for i in range(24)]
cls.quarter_product_tags = ["Q" + str(i) for i in range(1, 5)]
def setUp(self):
"""
Initializing the history container for each individual test.
"""
self.container = EpexHistoryContainer(self.history_specs, None,
self.days[0], 'minute',
env=self.env)
def test_full_data_content(self):
"""
Testing the history with a full set of data over three days, i.e. the
contains values for all in 'epex_auction', 'intraday'
and for all appropriate time periods.
"""
data = self.create_full_data()
bar = BarData(data)
self.container.update(bar, self.days[-1])
history = self.container.get_history()
for market in self.market_forms:
self.assertLessEqual(len(history[market]), self.bar_count)
for current_sid in data:
current_data = data[current_sid]
self.assertEqual(history[current_data['market']]
[current_data['product']].ix[current_data['day']],
current_data['price'])
def test_sparse_data_content(self):
"""
Testing whether the entries in the history are correct, when created
from a small set of data, containing only 'epex_auction' prices for
one period over five days.
This test updates creates the history one data entry at a time. A batch
update is tested in another routine.
"""
data = self.create_sparse_data()
for current_sid in data:
current_data = data[current_sid]
bar = BarData({current_sid: current_data})
self.container.update(bar, self.days[-1])
history = self.container.get_history()
self.assertLessEqual(len(history[current_data['market']]),
self.bar_count)
self.assertEqual(history[current_data['market']]
[current_data['product']].ix[current_data['day']],
current_data['price'])
def test_sparse_data_content_batch_update(self):
"""
Testing whether the entries in the history are correct, when created
from a small set of data, containing only 'epex_auction' prices for
one period over five days.
This test updates creates the history in one go. An entrywise creation
is tested in other routine.
"""
data = self.create_sparse_data()
bar = BarData(data)
self.container.update(bar, self.days[-1])
history = self.container.get_history()
self.assertLessEqual(len(history['epex_auction']), self.bar_count)
for current_sid in data:
current_data = data[current_sid]
if current_data['day'] not in self.days[-3:]:
continue
self.assertEqual(history[current_data['market']]
[current_data['product']].ix[current_data['day']],
current_data['price'])
def test_sparse_data_final_dates(self):
"""
Testing whether the dates in the history are correct, when created
from a small set of data, containing only 'epex_auction' prices for
one period over five days.
This test updates creates the history one data entry at a time. A batch
update is tested in another routine.
"""
data = self.create_sparse_data()
for current_sid in data:
current_data = data[current_sid]
bar = BarData({current_sid: current_data})
self.container.update(bar, self.days[-1])
history = self.container.get_history()
observed_dates = history['epex_auction'].index.tolist()
expected_dates = self.days[-3:]
for i in range(3):
self.assertEqual(observed_dates[i].date(),
expected_dates[i].date())
def test_sparse_data_final_dates_batch_update(self):
"""
Testing whether the dates in the history are correct, when created
from a small set of data, containing only 'epex_auction' prices for
one period over five days.
This test updates creates the history in one go. An entrywise creation
is tested in other routine.
"""
data = self.create_sparse_data()
bar = BarData(data)
self.container.update(bar, self.days[-1])
history = self.container.get_history()
observed_dates = history['epex_auction'].index.tolist()
expected_dates = self.days[-3:]
for i in range(3):
self.assertEqual(observed_dates[i].date(),
expected_dates[i].date())
def test_dates_after_complex_update(self):
"""
Testing dates in the history after an update with somewhat complicated
data configurations.
This test updates creates the history one data entry at a time. A batch
update is tested in another routine.
"""
data1, data2, test_data = self.create_data_for_complex_update()
for current_sid in data1:
current_data = data1[current_sid]
bar = BarData({current_sid: current_data})
self.container.update(bar, self.days[-1])
history = self.container.get_history()
observed_dates = history['epex_auction'].index.tolist()
expected_dates = [self.days[i] for i in [0, 2, 4]]
for i in range(3):
self.assertEqual(observed_dates[i].date(),
expected_dates[i].date())
for current_sid in data2:
current_data = data2[current_sid]
bar = BarData({current_sid: current_data})
self.container.update(bar, self.days[-1])
history = self.container.get_history()
observed_dates = history['epex_auction'].index.tolist()
expected_dates = [self.days[i] for i in [2, 3, 4]]
for i in range(3):
self.assertEqual(observed_dates[i].date(),
expected_dates[i].date())
for sid in test_data:
current_data = test_data[sid]
current_date = current_data['day']
current_product = current_data['product']
observed_price = \
history['epex_auction'][current_product][current_date]
expected_price = current_data['price']
self.assertEqual(observed_price, expected_price)
self.assertTrue(np.isnan(
history['epex_auction']['00-01'][self.days[3]]))
self.assertTrue(np.isnan(
history['epex_auction']['02-03'][self.days[4]]))
def test_dates_after_complex_update_batch_update(self):
"""
Testing dates in the history after an update with somewhat complicated
data configurations.
This test updates creates the history in one go. An entrywise creation
is tested in other routine.
"""
data1, data2, test_data = self.create_data_for_complex_update()
bar = BarData(data1)
self.container.update(bar, self.days[-1])
history = self.container.get_history()
observed_dates = history['epex_auction'].index.tolist()
expected_dates = [self.days[i] for i in [0, 2, 4]]
for i in range(3):
self.assertEqual(observed_dates[i].date(),
expected_dates[i].date())
bar = BarData(data2)
self.container.update(bar, self.days[-1])
history = self.container.get_history()
observed_dates = history['epex_auction'].index.tolist()
expected_dates = [self.days[i] for i in [2, 3, 4]]
for i in range(3):
self.assertEqual(observed_dates[i].date(),
expected_dates[i].date())
for sid in test_data:
current_data = test_data[sid]
current_date = current_data['day']
current_product = current_data['product']
observed_price = \
history['epex_auction'][current_product][current_date]
expected_price = current_data['price']
self.assertEqual(observed_price, expected_price)
self.assertTrue(np.isnan(
history['epex_auction']['00-01'][self.days[3]]))
self.assertTrue(np.isnan(
history['epex_auction']['02-03'][self.days[4]]))
def test_edge_cases(self):
"""
Test cases where nothing is added to the history.
"""
# Create deep copy of empyt history for comparison
history = self.container.get_history()
empty_history_copy = {}
for market in history.keys():
empty_history_copy[market] = history[market].copy()
data1, data2 = self.create_edge_case_data()
# Test empty data
bar = BarData(data1)
self.container.update(bar, self.days[-1])
history = self.container.get_history()
for market in history.keys():
self.assertTrue(history[market].equals(empty_history_copy[market]))
# Test 'cascade' and 'auction_signal'
bar = BarData(data2)
self.container.update(bar, self.days[-1])
history = self.container.get_history()
for market in history.keys():
self.assertTrue(history[market].equals(empty_history_copy[market]))
def create_full_data(self):
"""
Create a set of complete data for three days. With all price types and
for all periods.
:return: A set of mock data.
"""
data = {}
rolling_sid = 1
for single_date in self.days[0:3]:
for current_hour in range(24):
data[rolling_sid] = {
'dt': self.days[-1],
'price': np.random.uniform(0, 100),
'market': self.market_forms[0],
'product': self.hourly_products[current_hour],
'day': single_date,
'sid': rolling_sid
}
rolling_sid += 1
for market in self.market_forms[1:4]:
for current_quarter in self.quarter_product_tags:
data[rolling_sid] = {
'dt': self.days[-1],
'price': np.random.uniform(0, 100),
'market': market,
'product': "%02d" % current_hour + current_quarter,
'day': single_date,
'sid': rolling_sid
}
rolling_sid += 1
return data
def create_sparse_data(self):
"""
Create a small set of data, containing only one price for
'epex_auction' for 5 days in a row
:return: A set of mock data.
"""
data = {}
rolling_sid = 1
for single_date in self.days:
data[rolling_sid] = {
'dt': self.days[-1],
'price': np.random.uniform(0, 100),
'market': 'epex_auction',
'product': self.hourly_products[np.random.randint(0, 24)],
'day': single_date,
'sid': rolling_sid
}
rolling_sid += 1
return data
def create_data_for_complex_update(self):
"""
Create two sets of data where the second set updates upon the first one
both by introducing a new date which lies within the recordable time
frame.
:return: two individual sets of mock data
"""
data1 = {}
rolling_sid = 1
for i in [0, 2, 4]:
data1[rolling_sid] = {
'dt': self.days[-1],
'price': np.random.uniform(0, 100),
'market': 'epex_auction',
'product': '00-01',
'day': self.days[i],
'sid': rolling_sid
}
rolling_sid += 1
data1[rolling_sid] = {
'dt': self.days[-1],
'price': np.random.uniform(0, 100),
'market': 'epex_auction',
'product': '01-02',
'day': self.days[i],
'sid': rolling_sid
}
rolling_sid += 1
data2 = {}
for i in [1, 2, 3]:
data2[rolling_sid] = {
'dt': self.days[-1],
'price': np.random.uniform(0, 100),
'market': 'epex_auction',
'product': '01-02',
'day': self.days[i],
'sid': rolling_sid
}
rolling_sid += 1
data2[rolling_sid] = {
'dt': self.days[-1],
'price': np.random.uniform(0, 100),
'market': 'epex_auction',
'product': '02-03',
'day': self.days[i],
'sid': rolling_sid
}
rolling_sid += 1
test_data = {}
for i in [3, 5, 6]:
test_data[i] = data1[i]
for i in range(9, 13):
test_data[i] = data2[i]
return [data1, data2, test_data]
def create_edge_case_data(self):
"""
Create two data sets which will not be recorded in the history.
:return: two individual sets of mock data
"""
data1 = {}
data2 = {1: {'dt': self.days[-1],
'price': np.random.uniform(0, 100),
'market': 'cascade',
'product': '00-01',
'day': self.days[0],
'sid': 1},
2: {'dt': self.days[-1],
'price': np.random.uniform(0, 100),
'market': 'auction_signal',
'product': '00-01',
'day': self.days[0],
'sid': 2}}
return [data1, data2]
| apache-2.0 |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/metrics/tests/test_common.py | 31 | 41654 | from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not covered by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": partial(r2_score, multioutput='variance_weighted'),
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Those metrics don't support binary inputs
METRIC_UNDEFINED_BINARY = [
"samples_f0.5_score",
"samples_f1_score",
"samples_f2_score",
"samples_precision_score",
"samples_recall_score",
"coverage_error",
"roc_auc_score",
"micro_roc_auc",
"weighted_roc_auc",
"macro_roc_auc",
"samples_roc_auc",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"macro_average_precision_score",
"samples_average_precision_score",
"label_ranking_loss",
"label_ranking_average_precision_score",
]
# Those metrics don't support multiclass inputs
METRIC_UNDEFINED_MULTICLASS = [
"brier_score_loss",
"matthews_corrcoef_score",
]
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_BINARY_MULTICLASS = set(METRIC_UNDEFINED_BINARY).union(
set(METRIC_UNDEFINED_MULTICLASS))
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = [
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
]
# Threshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = [
"roc_auc_score", "average_precision_score",
]
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = [
"roc_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = [
"confusion_matrix",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"cohen_kappa_score",
]
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = [
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
]
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = [
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "macro_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"macro_average_precision_score",
"coverage_error", "label_ranking_loss",
]
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
]
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = [
"mean_absolute_error", "mean_squared_error", "r2_score",
"explained_variance_score"
]
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "weighted_f1_score", "micro_f1_score", "macro_f1_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error",
"cohen_kappa_score",
]
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = [
"explained_variance_score",
"r2_score",
"confusion_matrix",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f2_score", "weighted_precision_score",
"weighted_recall_score",
"micro_f0.5_score", "micro_f2_score", "micro_precision_score",
"micro_recall_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
]
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = [
"cohen_kappa_score",
"confusion_matrix", # Left this one here because the tests in this file do
# not work for confusion_matrix, as its output is a
# matrix instead of a number. Testing of
# confusion_matrix with sample_weight is in
# test_classification.py
"median_absolute_error",
]
@ignore_warnings
def test_symmetry():
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(set(SYMMETRIC_METRICS).union(
NOT_SYMMETRIC_METRICS, THRESHOLDED_METRICS,
METRIC_UNDEFINED_BINARY_MULTICLASS), set(ALL_METRICS))
assert_equal(
set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)),
msg="%s seems to be symmetric" % name)
@ignore_warnings
def test_sample_order_invariance():
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_format_invariance_with_1d_vectors():
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_equal(y1_1d.ndim, 1)
assert_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure = metric(y1, y2)
assert_almost_equal(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant "
"with list" % name)
assert_almost_equal(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant "
"with np-array-1d" % name)
assert_almost_equal(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant "
"with np-array-column" % name)
# Mix format support
assert_almost_equal(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
@ignore_warnings
def test_invariance_string_vs_numbers_labels():
# Ensure that classification metrics with string labels
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
for name, metric in CLASSIFICATION_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
for name, metric in THRESHOLDED_METRICS.items():
if name in ("log_loss", "hinge_loss", "unnormalized_log_loss",
"brier_score_loss"):
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
def test_single_sample():
for name in ALL_METRICS:
if (name in METRIC_UNDEFINED_BINARY_MULTICLASS or
name in THRESHOLDED_METRICS):
# Those metrics are not always defined with one sample
# or in multiclass classification
continue
yield check_single_sample, name
for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS:
yield check_single_sample_multioutput, name
def test_multioutput_number_of_output_differ():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
def test_multioutput_regression_invariance_to_dimension_shuffling():
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling "
"invariant" % name)
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples,
allow_unlabeled=True)
_, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples,
allow_unlabeled=True)
# To make sure at least one empty label is present
y1 += [0]*n_classes
y2 += [0]*n_classes
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_almost_equal(metric(y1_sparse_indicator,
y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and sparse indicator "
"formats." % name)
def test_raise_value_error_multilabel_sequences():
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[0, 1]],
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype='object')]
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
assert_raises(ValueError, metric, seq, seq)
def test_normalize_option_binary_classification(n_samples=20):
# Test in the binary case
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multiclasss_classification():
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples)
_, y_pred = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples)
# To make sure at least one empty label is present
y_true += [0]*n_classes
y_pred += [0]*n_classes
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_array_almost_equal(label_measure,
[metric(y_true_binarize[:, i],
y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(),
y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_almost_equal(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, np.average(label_measure,
weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_almost_equal(sample_measure,
np.mean([metric(y_true_binarize[i],
y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
def test_averaging_multiclass(n_samples=50, n_classes=3):
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel(n_classes=5, n_samples=40):
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
def test_averaging_multilabel_all_ones():
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_almost_equal(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
assert_not_equal(
unweighted_score, weighted_score,
msg="Unweighted and weighted scores are unexpectedly "
"equal (%f) for %s" % (weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_almost_equal(
weighted_score, weighted_score_list,
err_msg=("Weighted scores for array and list "
"sample_weight input are not equal (%f != %f) for %s") % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_almost_equal(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_almost_equal(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%f != %f) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_almost_equal(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if sample_weight.shape[0] != y_true.shape[0], it raised an
# error
assert_raises(Exception, metric, y1, y2,
sample_weight=np.hstack([sample_weight, sample_weight]))
def test_sample_weight_invariance(n_samples=50):
random_state = check_random_state(0)
# binary
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multiclass
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multilabel indicator
_, ya = make_multilabel_classification(n_features=1, n_classes=20,
random_state=0, n_samples=100,
allow_unlabeled=False)
_, yb = make_multilabel_classification(n_features=1, n_classes=20,
random_state=1, n_samples=100,
allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTIOUTPUT_METRICS):
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (check_sample_weight_invariance, name, metric, y_true,
y_score)
else:
yield (check_sample_weight_invariance, name, metric, y_true,
y_pred)
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.shape[1] > 0:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
| unlicense |
TomAugspurger/pandas | pandas/tests/groupby/test_grouping.py | 1 | 35091 | """ test where we are determining what we are grouping, or getting groups """
import numpy as np
import pytest
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.groupby.grouper import Grouping
# selection
# --------------------------------
class TestSelection:
def test_select_bad_cols(self):
df = DataFrame([[1, 2]], columns=["A", "B"])
g = df.groupby("A")
with pytest.raises(KeyError, match="\"Columns not found: 'C'\""):
g[["C"]]
with pytest.raises(KeyError, match="^[^A]+$"):
# A should not be referenced as a bad column...
# will have to rethink regex if you change message!
g[["A", "C"]]
def test_groupby_duplicated_column_errormsg(self):
# GH7511
df = DataFrame(
columns=["A", "B", "A", "C"], data=[range(4), range(2, 6), range(0, 8, 2)]
)
msg = "Grouper for 'A' not 1-dimensional"
with pytest.raises(ValueError, match=msg):
df.groupby("A")
with pytest.raises(ValueError, match=msg):
df.groupby(["A", "B"])
grouped = df.groupby("B")
c = grouped.count()
assert c.columns.nlevels == 1
assert c.columns.size == 3
def test_column_select_via_attr(self, df):
result = df.groupby("A").C.sum()
expected = df.groupby("A")["C"].sum()
tm.assert_series_equal(result, expected)
df["mean"] = 1.5
result = df.groupby("A").mean()
expected = df.groupby("A").agg(np.mean)
tm.assert_frame_equal(result, expected)
def test_getitem_list_of_columns(self):
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
"C": np.random.randn(8),
"D": np.random.randn(8),
"E": np.random.randn(8),
}
)
result = df.groupby("A")[["C", "D"]].mean()
result2 = df.groupby("A")[df.columns[2:4]].mean()
expected = df.loc[:, ["A", "C", "D"]].groupby("A").mean()
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_getitem_numeric_column_names(self):
# GH #13731
df = DataFrame(
{
0: list("abcd") * 2,
2: np.random.randn(8),
4: np.random.randn(8),
6: np.random.randn(8),
}
)
result = df.groupby(0)[df.columns[1:3]].mean()
result2 = df.groupby(0)[[2, 4]].mean()
expected = df.loc[:, [0, 2, 4]].groupby(0).mean()
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# per GH 23566 this should raise a FutureWarning
with tm.assert_produces_warning(FutureWarning):
df.groupby(0)[2, 4].mean()
def test_getitem_single_list_of_columns(self, df):
# per GH 23566 this should raise a FutureWarning
with tm.assert_produces_warning(FutureWarning):
df.groupby("A")["C", "D"].mean()
def test_getitem_single_column(self):
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
"C": np.random.randn(8),
"D": np.random.randn(8),
"E": np.random.randn(8),
}
)
result = df.groupby("A")["C"].mean()
as_frame = df.loc[:, ["A", "C"]].groupby("A").mean()
as_series = as_frame.iloc[:, 0]
expected = as_series
tm.assert_series_equal(result, expected)
# grouping
# --------------------------------
class TestGrouping:
def test_grouper_index_types(self):
# related GH5375
# groupby misbehaving when using a Floatlike index
df = DataFrame(np.arange(10).reshape(5, 2), columns=list("AB"))
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
df.index = index(len(df))
df.groupby(list("abcde")).apply(lambda x: x)
df.index = list(reversed(df.index.tolist()))
df.groupby(list("abcde")).apply(lambda x: x)
def test_grouper_multilevel_freq(self):
# GH 7885
# with level and freq specified in a pd.Grouper
from datetime import date, timedelta
d0 = date.today() - timedelta(days=14)
dates = date_range(d0, date.today())
date_index = pd.MultiIndex.from_product([dates, dates], names=["foo", "bar"])
df = pd.DataFrame(np.random.randint(0, 100, 225), index=date_index)
# Check string level
expected = (
df.reset_index()
.groupby([pd.Grouper(key="foo", freq="W"), pd.Grouper(key="bar", freq="W")])
.sum()
)
# reset index changes columns dtype to object
expected.columns = pd.Index([0], dtype="int64")
result = df.groupby(
[pd.Grouper(level="foo", freq="W"), pd.Grouper(level="bar", freq="W")]
).sum()
tm.assert_frame_equal(result, expected)
# Check integer level
result = df.groupby(
[pd.Grouper(level=0, freq="W"), pd.Grouper(level=1, freq="W")]
).sum()
tm.assert_frame_equal(result, expected)
def test_grouper_creation_bug(self):
# GH 8795
df = DataFrame({"A": [0, 0, 1, 1, 2, 2], "B": [1, 2, 3, 4, 5, 6]})
g = df.groupby("A")
expected = g.sum()
g = df.groupby(pd.Grouper(key="A"))
result = g.sum()
tm.assert_frame_equal(result, expected)
result = g.apply(lambda x: x.sum())
tm.assert_frame_equal(result, expected)
g = df.groupby(pd.Grouper(key="A", axis=0))
result = g.sum()
tm.assert_frame_equal(result, expected)
# GH14334
# pd.Grouper(key=...) may be passed in a list
df = DataFrame(
{"A": [0, 0, 0, 1, 1, 1], "B": [1, 1, 2, 2, 3, 3], "C": [1, 2, 3, 4, 5, 6]}
)
# Group by single column
expected = df.groupby("A").sum()
g = df.groupby([pd.Grouper(key="A")])
result = g.sum()
tm.assert_frame_equal(result, expected)
# Group by two columns
# using a combination of strings and Grouper objects
expected = df.groupby(["A", "B"]).sum()
# Group with two Grouper objects
g = df.groupby([pd.Grouper(key="A"), pd.Grouper(key="B")])
result = g.sum()
tm.assert_frame_equal(result, expected)
# Group with a string and a Grouper object
g = df.groupby(["A", pd.Grouper(key="B")])
result = g.sum()
tm.assert_frame_equal(result, expected)
# Group with a Grouper object and a string
g = df.groupby([pd.Grouper(key="A"), "B"])
result = g.sum()
tm.assert_frame_equal(result, expected)
# GH8866
s = Series(
np.arange(8, dtype="int64"),
index=pd.MultiIndex.from_product(
[list("ab"), range(2), date_range("20130101", periods=2)],
names=["one", "two", "three"],
),
)
result = s.groupby(pd.Grouper(level="three", freq="M")).sum()
expected = Series(
[28], index=Index([Timestamp("2013-01-31")], freq="M", name="three")
)
tm.assert_series_equal(result, expected)
# just specifying a level breaks
result = s.groupby(pd.Grouper(level="one")).sum()
expected = s.groupby(level="one").sum()
tm.assert_series_equal(result, expected)
def test_grouper_column_and_index(self):
# GH 14327
# Grouping a multi-index frame by a column and an index level should
# be equivalent to resetting the index and grouping by two columns
idx = pd.MultiIndex.from_tuples(
[("a", 1), ("a", 2), ("a", 3), ("b", 1), ("b", 2), ("b", 3)]
)
idx.names = ["outer", "inner"]
df_multi = pd.DataFrame(
{"A": np.arange(6), "B": ["one", "one", "two", "two", "one", "one"]},
index=idx,
)
result = df_multi.groupby(["B", pd.Grouper(level="inner")]).mean()
expected = df_multi.reset_index().groupby(["B", "inner"]).mean()
tm.assert_frame_equal(result, expected)
# Test the reverse grouping order
result = df_multi.groupby([pd.Grouper(level="inner"), "B"]).mean()
expected = df_multi.reset_index().groupby(["inner", "B"]).mean()
tm.assert_frame_equal(result, expected)
# Grouping a single-index frame by a column and the index should
# be equivalent to resetting the index and grouping by two columns
df_single = df_multi.reset_index("outer")
result = df_single.groupby(["B", pd.Grouper(level="inner")]).mean()
expected = df_single.reset_index().groupby(["B", "inner"]).mean()
tm.assert_frame_equal(result, expected)
# Test the reverse grouping order
result = df_single.groupby([pd.Grouper(level="inner"), "B"]).mean()
expected = df_single.reset_index().groupby(["inner", "B"]).mean()
tm.assert_frame_equal(result, expected)
def test_groupby_levels_and_columns(self):
# GH9344, GH9049
idx_names = ["x", "y"]
idx = pd.MultiIndex.from_tuples(
[(1, 1), (1, 2), (3, 4), (5, 6)], names=idx_names
)
df = pd.DataFrame(np.arange(12).reshape(-1, 3), index=idx)
by_levels = df.groupby(level=idx_names).mean()
# reset_index changes columns dtype to object
by_columns = df.reset_index().groupby(idx_names).mean()
tm.assert_frame_equal(by_levels, by_columns, check_column_type=False)
by_columns.columns = pd.Index(by_columns.columns, dtype=np.int64)
tm.assert_frame_equal(by_levels, by_columns)
def test_groupby_categorical_index_and_columns(self, observed):
# GH18432, adapted for GH25871
columns = ["A", "B", "A", "B"]
categories = ["B", "A"]
data = np.array(
[[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2]], int
)
cat_columns = CategoricalIndex(columns, categories=categories, ordered=True)
df = DataFrame(data=data, columns=cat_columns)
result = df.groupby(axis=1, level=0, observed=observed).sum()
expected_data = np.array([[4, 2], [4, 2], [4, 2], [4, 2], [4, 2]], int)
expected_columns = CategoricalIndex(
categories, categories=categories, ordered=True
)
expected = DataFrame(data=expected_data, columns=expected_columns)
tm.assert_frame_equal(result, expected)
# test transposed version
df = DataFrame(data.T, index=cat_columns)
result = df.groupby(axis=0, level=0, observed=observed).sum()
expected = DataFrame(data=expected_data.T, index=expected_columns)
tm.assert_frame_equal(result, expected)
def test_grouper_getting_correct_binner(self):
# GH 10063
# using a non-time-based grouper and a time-based grouper
# and specifying levels
df = DataFrame(
{"A": 1},
index=pd.MultiIndex.from_product(
[list("ab"), date_range("20130101", periods=80)], names=["one", "two"]
),
)
result = df.groupby(
[pd.Grouper(level="one"), pd.Grouper(level="two", freq="M")]
).sum()
expected = DataFrame(
{"A": [31, 28, 21, 31, 28, 21]},
index=MultiIndex.from_product(
[list("ab"), date_range("20130101", freq="M", periods=3)],
names=["one", "two"],
),
)
tm.assert_frame_equal(result, expected)
def test_grouper_iter(self, df):
assert sorted(df.groupby("A").grouper) == ["bar", "foo"]
def test_empty_groups(self, df):
# see gh-1048
with pytest.raises(ValueError, match="No group keys passed!"):
df.groupby([])
def test_groupby_grouper(self, df):
grouped = df.groupby("A")
result = df.groupby(grouped.grouper).mean()
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_groupby_dict_mapping(self):
# GH #679
from pandas import Series
s = Series({"T1": 5})
result = s.groupby({"T1": "T2"}).agg(sum)
expected = s.groupby(["T2"]).agg(sum)
tm.assert_series_equal(result, expected)
s = Series([1.0, 2.0, 3.0, 4.0], index=list("abcd"))
mapping = {"a": 0, "b": 0, "c": 1, "d": 1}
result = s.groupby(mapping).mean()
result2 = s.groupby(mapping).agg(np.mean)
expected = s.groupby([0, 0, 1, 1]).mean()
expected2 = s.groupby([0, 0, 1, 1]).mean()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, result2)
tm.assert_series_equal(result, expected2)
def test_groupby_grouper_f_sanity_checked(self):
dates = date_range("01-Jan-2013", periods=12, freq="MS")
ts = Series(np.random.randn(12), index=dates)
# GH3035
# index.map is used to apply grouper to the index
# if it fails on the elements, map tries it on the entire index as
# a sequence. That can yield invalid results that cause trouble
# down the line.
# the surprise comes from using key[0:6] rather than str(key)[0:6]
# when the elements are Timestamp.
# the result is Index[0:6], very confusing.
msg = r"Grouper result violates len\(labels\) == len\(data\)"
with pytest.raises(AssertionError, match=msg):
ts.groupby(lambda key: key[0:6])
def test_grouping_error_on_multidim_input(self, df):
msg = "Grouper for '<class 'pandas.core.frame.DataFrame'>' not 1-dimensional"
with pytest.raises(ValueError, match=msg):
Grouping(df.index, df[["A", "A"]])
def test_multiindex_passthru(self):
# GH 7997
# regression from 0.14.1
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
df.columns = pd.MultiIndex.from_tuples([(0, 1), (1, 1), (2, 1)])
result = df.groupby(axis=1, level=[0, 1]).first()
tm.assert_frame_equal(result, df)
def test_multiindex_negative_level(self, mframe):
# GH 13901
result = mframe.groupby(level=-1).sum()
expected = mframe.groupby(level="second").sum()
tm.assert_frame_equal(result, expected)
result = mframe.groupby(level=-2).sum()
expected = mframe.groupby(level="first").sum()
tm.assert_frame_equal(result, expected)
result = mframe.groupby(level=[-2, -1]).sum()
expected = mframe
tm.assert_frame_equal(result, expected)
result = mframe.groupby(level=[-1, "first"]).sum()
expected = mframe.groupby(level=["second", "first"]).sum()
tm.assert_frame_equal(result, expected)
def test_multifunc_select_col_integer_cols(self, df):
df.columns = np.arange(len(df.columns))
# it works!
df.groupby(1, as_index=False)[2].agg({"Q": np.mean})
def test_multiindex_columns_empty_level(self):
lst = [["count", "values"], ["to filter", ""]]
midx = MultiIndex.from_tuples(lst)
df = DataFrame([[1, "A"]], columns=midx)
grouped = df.groupby("to filter").groups
assert grouped["A"] == [0]
grouped = df.groupby([("to filter", "")]).groups
assert grouped["A"] == [0]
df = DataFrame([[1, "A"], [2, "B"]], columns=midx)
expected = df.groupby("to filter").groups
result = df.groupby([("to filter", "")]).groups
assert result == expected
df = DataFrame([[1, "A"], [2, "A"]], columns=midx)
expected = df.groupby("to filter").groups
result = df.groupby([("to filter", "")]).groups
tm.assert_dict_equal(result, expected)
def test_groupby_multiindex_tuple(self):
# GH 17979
df = pd.DataFrame(
[[1, 2, 3, 4], [3, 4, 5, 6], [1, 4, 2, 3]],
columns=pd.MultiIndex.from_arrays([["a", "b", "b", "c"], [1, 1, 2, 2]]),
)
expected = df.groupby([("b", 1)]).groups
result = df.groupby(("b", 1)).groups
tm.assert_dict_equal(expected, result)
df2 = pd.DataFrame(
df.values,
columns=pd.MultiIndex.from_arrays(
[["a", "b", "b", "c"], ["d", "d", "e", "e"]]
),
)
expected = df2.groupby([("b", "d")]).groups
result = df.groupby(("b", 1)).groups
tm.assert_dict_equal(expected, result)
df3 = pd.DataFrame(df.values, columns=[("a", "d"), ("b", "d"), ("b", "e"), "c"])
expected = df3.groupby([("b", "d")]).groups
result = df.groupby(("b", 1)).groups
tm.assert_dict_equal(expected, result)
@pytest.mark.parametrize("sort", [True, False])
def test_groupby_level(self, sort, mframe, df):
# GH 17537
frame = mframe
deleveled = frame.reset_index()
result0 = frame.groupby(level=0, sort=sort).sum()
result1 = frame.groupby(level=1, sort=sort).sum()
expected0 = frame.groupby(deleveled["first"].values, sort=sort).sum()
expected1 = frame.groupby(deleveled["second"].values, sort=sort).sum()
expected0.index.name = "first"
expected1.index.name = "second"
assert result0.index.name == "first"
assert result1.index.name == "second"
tm.assert_frame_equal(result0, expected0)
tm.assert_frame_equal(result1, expected1)
assert result0.index.name == frame.index.names[0]
assert result1.index.name == frame.index.names[1]
# groupby level name
result0 = frame.groupby(level="first", sort=sort).sum()
result1 = frame.groupby(level="second", sort=sort).sum()
tm.assert_frame_equal(result0, expected0)
tm.assert_frame_equal(result1, expected1)
# axis=1
result0 = frame.T.groupby(level=0, axis=1, sort=sort).sum()
result1 = frame.T.groupby(level=1, axis=1, sort=sort).sum()
tm.assert_frame_equal(result0, expected0.T)
tm.assert_frame_equal(result1, expected1.T)
# raise exception for non-MultiIndex
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
df.groupby(level=1)
def test_groupby_level_index_names(self, axis):
# GH4014 this used to raise ValueError since 'exp'>1 (in py2)
df = DataFrame({"exp": ["A"] * 3 + ["B"] * 3, "var1": range(6)}).set_index(
"exp"
)
if axis in (1, "columns"):
df = df.T
df.groupby(level="exp", axis=axis)
msg = f"level name foo is not the name of the {df._get_axis_name(axis)}"
with pytest.raises(ValueError, match=msg):
df.groupby(level="foo", axis=axis)
@pytest.mark.parametrize("sort", [True, False])
def test_groupby_level_with_nas(self, sort):
# GH 17537
index = MultiIndex(
levels=[[1, 0], [0, 1, 2, 3]],
codes=[[1, 1, 1, 1, 0, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]],
)
# factorizing doesn't confuse things
s = Series(np.arange(8.0), index=index)
result = s.groupby(level=0, sort=sort).sum()
expected = Series([6.0, 22.0], index=[0, 1])
tm.assert_series_equal(result, expected)
index = MultiIndex(
levels=[[1, 0], [0, 1, 2, 3]],
codes=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]],
)
# factorizing doesn't confuse things
s = Series(np.arange(8.0), index=index)
result = s.groupby(level=0, sort=sort).sum()
expected = Series([6.0, 18.0], index=[0.0, 1.0])
tm.assert_series_equal(result, expected)
def test_groupby_args(self, mframe):
# PR8618 and issue 8015
frame = mframe
msg = "You have to supply one of 'by' and 'level'"
with pytest.raises(TypeError, match=msg):
frame.groupby()
msg = "You have to supply one of 'by' and 'level'"
with pytest.raises(TypeError, match=msg):
frame.groupby(by=None, level=None)
@pytest.mark.parametrize(
"sort,labels",
[
[True, [2, 2, 2, 0, 0, 1, 1, 3, 3, 3]],
[False, [0, 0, 0, 1, 1, 2, 2, 3, 3, 3]],
],
)
def test_level_preserve_order(self, sort, labels, mframe):
# GH 17537
grouped = mframe.groupby(level=0, sort=sort)
exp_labels = np.array(labels, np.intp)
tm.assert_almost_equal(grouped.grouper.codes[0], exp_labels)
def test_grouping_labels(self, mframe):
grouped = mframe.groupby(mframe.index.get_level_values(0))
exp_labels = np.array([2, 2, 2, 0, 0, 1, 1, 3, 3, 3], dtype=np.intp)
tm.assert_almost_equal(grouped.grouper.codes[0], exp_labels)
def test_list_grouper_with_nat(self):
# GH 14715
df = pd.DataFrame({"date": pd.date_range("1/1/2011", periods=365, freq="D")})
df.iloc[-1] = pd.NaT
grouper = pd.Grouper(key="date", freq="AS")
# Grouper in a list grouping
result = df.groupby([grouper])
expected = {pd.Timestamp("2011-01-01"): pd.Index(list(range(364)))}
tm.assert_dict_equal(result.groups, expected)
# Test case without a list
result = df.groupby(grouper)
expected = {pd.Timestamp("2011-01-01"): 365}
tm.assert_dict_equal(result.groups, expected)
@pytest.mark.parametrize(
"func,expected",
[
(
"transform",
pd.Series(name=2, dtype=np.float64, index=pd.RangeIndex(0, 0, 1)),
),
(
"agg",
pd.Series(name=2, dtype=np.float64, index=pd.Float64Index([], name=1)),
),
(
"apply",
pd.Series(name=2, dtype=np.float64, index=pd.Float64Index([], name=1)),
),
],
)
def test_evaluate_with_empty_groups(self, func, expected):
# 26208
# test transform'ing empty groups
# (not testing other agg fns, because they return
# different index objects.
df = pd.DataFrame({1: [], 2: []})
g = df.groupby(1)
result = getattr(g[2], func)(lambda x: x)
tm.assert_series_equal(result, expected)
def test_groupby_empty(self):
# https://github.com/pandas-dev/pandas/issues/27190
s = pd.Series([], name="name", dtype="float64")
gr = s.groupby([])
result = gr.mean()
tm.assert_series_equal(result, s)
# check group properties
assert len(gr.grouper.groupings) == 1
tm.assert_numpy_array_equal(
gr.grouper.group_info[0], np.array([], dtype=np.dtype("int64"))
)
tm.assert_numpy_array_equal(
gr.grouper.group_info[1], np.array([], dtype=np.dtype("int"))
)
assert gr.grouper.group_info[2] == 0
# check name
assert s.groupby(s).grouper.names == ["name"]
def test_groupby_level_index_value_all_na(self):
# issue 20519
df = DataFrame(
[["x", np.nan, 10], [None, np.nan, 20]], columns=["A", "B", "C"]
).set_index(["A", "B"])
result = df.groupby(level=["A", "B"]).sum()
expected = DataFrame(
data=[],
index=MultiIndex(
levels=[Index(["x"], dtype="object"), Index([], dtype="float64")],
codes=[[], []],
names=["A", "B"],
),
columns=["C"],
dtype="int64",
)
tm.assert_frame_equal(result, expected)
def test_groupby_multiindex_level_empty(self):
# https://github.com/pandas-dev/pandas/issues/31670
df = pd.DataFrame(
[[123, "a", 1.0], [123, "b", 2.0]], columns=["id", "category", "value"]
)
df = df.set_index(["id", "category"])
empty = df[df.value < 0]
result = empty.groupby("id").sum()
expected = pd.DataFrame(
dtype="float64", columns=["value"], index=pd.Int64Index([], name="id")
)
tm.assert_frame_equal(result, expected)
# get_group
# --------------------------------
class TestGetGroup:
def test_get_group(self):
# GH 5267
# be datelike friendly
df = DataFrame(
{
"DATE": pd.to_datetime(
[
"10-Oct-2013",
"10-Oct-2013",
"10-Oct-2013",
"11-Oct-2013",
"11-Oct-2013",
"11-Oct-2013",
]
),
"label": ["foo", "foo", "bar", "foo", "foo", "bar"],
"VAL": [1, 2, 3, 4, 5, 6],
}
)
g = df.groupby("DATE")
key = list(g.groups)[0]
result1 = g.get_group(key)
result2 = g.get_group(Timestamp(key).to_pydatetime())
result3 = g.get_group(str(Timestamp(key)))
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
g = df.groupby(["DATE", "label"])
key = list(g.groups)[0]
result1 = g.get_group(key)
result2 = g.get_group((Timestamp(key[0]).to_pydatetime(), key[1]))
result3 = g.get_group((str(Timestamp(key[0])), key[1]))
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
# must pass a same-length tuple with multiple keys
msg = "must supply a tuple to get_group with multiple grouping keys"
with pytest.raises(ValueError, match=msg):
g.get_group("foo")
with pytest.raises(ValueError, match=msg):
g.get_group(("foo"))
msg = "must supply a same-length tuple to get_group with multiple grouping keys"
with pytest.raises(ValueError, match=msg):
g.get_group(("foo", "bar", "baz"))
def test_get_group_empty_bins(self, observed):
d = pd.DataFrame([3, 1, 7, 6])
bins = [0, 5, 10, 15]
g = d.groupby(pd.cut(d[0], bins), observed=observed)
# TODO: should prob allow a str of Interval work as well
# IOW '(0, 5]'
result = g.get_group(pd.Interval(0, 5))
expected = DataFrame([3, 1], index=[0, 1])
tm.assert_frame_equal(result, expected)
msg = r"Interval\(10, 15, closed='right'\)"
with pytest.raises(KeyError, match=msg):
g.get_group(pd.Interval(10, 15))
def test_get_group_grouped_by_tuple(self):
# GH 8121
df = DataFrame([[(1,), (1, 2), (1,), (1, 2)]], index=["ids"]).T
gr = df.groupby("ids")
expected = DataFrame({"ids": [(1,), (1,)]}, index=[0, 2])
result = gr.get_group((1,))
tm.assert_frame_equal(result, expected)
dt = pd.to_datetime(["2010-01-01", "2010-01-02", "2010-01-01", "2010-01-02"])
df = DataFrame({"ids": [(x,) for x in dt]})
gr = df.groupby("ids")
result = gr.get_group(("2010-01-01",))
expected = DataFrame({"ids": [(dt[0],), (dt[0],)]}, index=[0, 2])
tm.assert_frame_equal(result, expected)
def test_groupby_with_empty(self):
index = pd.DatetimeIndex(())
data = ()
series = pd.Series(data, index, dtype=object)
grouper = pd.Grouper(freq="D")
grouped = series.groupby(grouper)
assert next(iter(grouped), None) is None
def test_groupby_with_single_column(self):
df = pd.DataFrame({"a": list("abssbab")})
tm.assert_frame_equal(df.groupby("a").get_group("a"), df.iloc[[0, 5]])
# GH 13530
exp = pd.DataFrame(index=pd.Index(["a", "b", "s"], name="a"))
tm.assert_frame_equal(df.groupby("a").count(), exp)
tm.assert_frame_equal(df.groupby("a").sum(), exp)
tm.assert_frame_equal(df.groupby("a").nth(1), exp)
def test_gb_key_len_equal_axis_len(self):
# GH16843
# test ensures that index and column keys are recognized correctly
# when number of keys equals axis length of groupby
df = pd.DataFrame(
[["foo", "bar", "B", 1], ["foo", "bar", "B", 2], ["foo", "baz", "C", 3]],
columns=["first", "second", "third", "one"],
)
df = df.set_index(["first", "second"])
df = df.groupby(["first", "second", "third"]).size()
assert df.loc[("foo", "bar", "B")] == 2
assert df.loc[("foo", "baz", "C")] == 1
# groups & iteration
# --------------------------------
class TestIteration:
def test_groups(self, df):
grouped = df.groupby(["A"])
groups = grouped.groups
assert groups is grouped.groups # caching works
for k, v in grouped.groups.items():
assert (df.loc[v]["A"] == k).all()
grouped = df.groupby(["A", "B"])
groups = grouped.groups
assert groups is grouped.groups # caching works
for k, v in grouped.groups.items():
assert (df.loc[v]["A"] == k[0]).all()
assert (df.loc[v]["B"] == k[1]).all()
def test_grouping_is_iterable(self, tsframe):
# this code path isn't used anywhere else
# not sure it's useful
grouped = tsframe.groupby([lambda x: x.weekday(), lambda x: x.year])
# test it works
for g in grouped.grouper.groupings[0]:
pass
def test_multi_iter(self):
s = Series(np.arange(6))
k1 = np.array(["a", "a", "a", "b", "b", "b"])
k2 = np.array(["1", "2", "1", "2", "1", "2"])
grouped = s.groupby([k1, k2])
iterated = list(grouped)
expected = [
("a", "1", s[[0, 2]]),
("a", "2", s[[1]]),
("b", "1", s[[4]]),
("b", "2", s[[3, 5]]),
]
for i, ((one, two), three) in enumerate(iterated):
e1, e2, e3 = expected[i]
assert e1 == one
assert e2 == two
tm.assert_series_equal(three, e3)
def test_multi_iter_frame(self, three_group):
k1 = np.array(["b", "b", "b", "a", "a", "a"])
k2 = np.array(["1", "2", "1", "2", "1", "2"])
df = DataFrame(
{"v1": np.random.randn(6), "v2": np.random.randn(6), "k1": k1, "k2": k2},
index=["one", "two", "three", "four", "five", "six"],
)
grouped = df.groupby(["k1", "k2"])
# things get sorted!
iterated = list(grouped)
idx = df.index
expected = [
("a", "1", df.loc[idx[[4]]]),
("a", "2", df.loc[idx[[3, 5]]]),
("b", "1", df.loc[idx[[0, 2]]]),
("b", "2", df.loc[idx[[1]]]),
]
for i, ((one, two), three) in enumerate(iterated):
e1, e2, e3 = expected[i]
assert e1 == one
assert e2 == two
tm.assert_frame_equal(three, e3)
# don't iterate through groups with no data
df["k1"] = np.array(["b", "b", "b", "a", "a", "a"])
df["k2"] = np.array(["1", "1", "1", "2", "2", "2"])
grouped = df.groupby(["k1", "k2"])
groups = {key: gp for key, gp in grouped}
assert len(groups) == 2
# axis = 1
three_levels = three_group.groupby(["A", "B", "C"]).mean()
grouped = three_levels.T.groupby(axis=1, level=(1, 2))
for key, group in grouped:
pass
def test_dictify(self, df):
dict(iter(df.groupby("A")))
dict(iter(df.groupby(["A", "B"])))
dict(iter(df["C"].groupby(df["A"])))
dict(iter(df["C"].groupby([df["A"], df["B"]])))
dict(iter(df.groupby("A")["C"]))
dict(iter(df.groupby(["A", "B"])["C"]))
def test_groupby_with_small_elem(self):
# GH 8542
# length=2
df = pd.DataFrame(
{"event": ["start", "start"], "change": [1234, 5678]},
index=pd.DatetimeIndex(["2014-09-10", "2013-10-10"]),
)
grouped = df.groupby([pd.Grouper(freq="M"), "event"])
assert len(grouped.groups) == 2
assert grouped.ngroups == 2
assert (pd.Timestamp("2014-09-30"), "start") in grouped.groups
assert (pd.Timestamp("2013-10-31"), "start") in grouped.groups
res = grouped.get_group((pd.Timestamp("2014-09-30"), "start"))
tm.assert_frame_equal(res, df.iloc[[0], :])
res = grouped.get_group((pd.Timestamp("2013-10-31"), "start"))
tm.assert_frame_equal(res, df.iloc[[1], :])
df = pd.DataFrame(
{"event": ["start", "start", "start"], "change": [1234, 5678, 9123]},
index=pd.DatetimeIndex(["2014-09-10", "2013-10-10", "2014-09-15"]),
)
grouped = df.groupby([pd.Grouper(freq="M"), "event"])
assert len(grouped.groups) == 2
assert grouped.ngroups == 2
assert (pd.Timestamp("2014-09-30"), "start") in grouped.groups
assert (pd.Timestamp("2013-10-31"), "start") in grouped.groups
res = grouped.get_group((pd.Timestamp("2014-09-30"), "start"))
tm.assert_frame_equal(res, df.iloc[[0, 2], :])
res = grouped.get_group((pd.Timestamp("2013-10-31"), "start"))
tm.assert_frame_equal(res, df.iloc[[1], :])
# length=3
df = pd.DataFrame(
{"event": ["start", "start", "start"], "change": [1234, 5678, 9123]},
index=pd.DatetimeIndex(["2014-09-10", "2013-10-10", "2014-08-05"]),
)
grouped = df.groupby([pd.Grouper(freq="M"), "event"])
assert len(grouped.groups) == 3
assert grouped.ngroups == 3
assert (pd.Timestamp("2014-09-30"), "start") in grouped.groups
assert (pd.Timestamp("2013-10-31"), "start") in grouped.groups
assert (pd.Timestamp("2014-08-31"), "start") in grouped.groups
res = grouped.get_group((pd.Timestamp("2014-09-30"), "start"))
tm.assert_frame_equal(res, df.iloc[[0], :])
res = grouped.get_group((pd.Timestamp("2013-10-31"), "start"))
tm.assert_frame_equal(res, df.iloc[[1], :])
res = grouped.get_group((pd.Timestamp("2014-08-31"), "start"))
tm.assert_frame_equal(res, df.iloc[[2], :])
def test_grouping_string_repr(self):
# GH 13394
mi = MultiIndex.from_arrays([list("AAB"), list("aba")])
df = DataFrame([[1, 2, 3]], columns=mi)
gr = df.groupby(df[("A", "a")])
result = gr.grouper.groupings[0].__repr__()
expected = "Grouping(('A', 'a'))"
assert result == expected
| bsd-3-clause |
BonexGu/Blik2D-SDK | Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py | 72 | 12865 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(
predictions['class'], np.argmax(
predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(
classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
| mit |
mhdella/scikit-learn | sklearn/decomposition/truncated_svd.py | 199 | 7744 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# Michael Becker <mike@beckerfuffle.com>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
ssaeger/scikit-learn | sklearn/linear_model/randomized_l1.py | 9 | 24350 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import _preprocess_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
from ..exceptions import ConvergenceWarning
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_preprocess_data = staticmethod(_preprocess_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True,
ensure_min_samples=2, estimator=self)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = \
self._preprocess_data(X, y, self.fit_intercept, self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float64))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if isinstance(alpha, six.string_types) and alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float64))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _preprocess_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize=normalize)
return X, y, X_offset, y, X_scale
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
mojoboss/scikit-learn | examples/cluster/plot_affinity_propagation.py | 349 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
bospetersen/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_weightsGBM.py | 1 | 6149 | import sys
sys.path.insert(1, "../../../")
import h2o, tests
import random
import copy
def weights_check(ip,port):
def check_same(data1, data2, min_rows_scale):
gbm1_regression = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y="economy",
training_frame=data1,
min_rows=5,
ntrees=5,
max_depth=5)
gbm2_regression = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year", "weights"]],
y=data2["economy"],
min_rows=5*min_rows_scale,
weights_column=data2["weights"],
ntrees=5,
max_depth=5)
gbm1_binomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["economy_20mpg"],
min_rows=5,
distribution="bernoulli",
ntrees=5,
max_depth=5)
gbm2_binomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year", "weights"]],
y=data2["economy_20mpg"],
weights_column="weights",
training_frame=data2,
min_rows=5*min_rows_scale,
distribution="bernoulli",
ntrees=5,
max_depth=5)
gbm1_multinomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["cylinders"],
min_rows=5,
distribution="multinomial",
ntrees=5,
max_depth=5)
gbm2_multinomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year", "weights"]],
y=data2["cylinders"],
weights_column="weights",
training_frame=data2,
min_rows=5*min_rows_scale,
distribution="multinomial",
ntrees=5,
max_depth=5)
reg1_mse = gbm1_regression.mse()
reg2_mse = gbm2_regression.mse()
bin1_auc = gbm1_binomial.auc()
bin2_auc = gbm2_binomial.auc()
mul1_mse = gbm1_multinomial.mse()
mul2_mse = gbm2_multinomial.mse()
print "MSE (regresson) no weights vs. weights: {0}, {1}".format(reg1_mse, reg2_mse)
print "AUC (binomial) no weights vs. weights: {0}, {1}".format(bin1_auc, bin2_auc)
print "MSE (multinomial) no weights vs. weights: {0}, {1}".format(mul1_mse, mul2_mse)
assert abs(reg1_mse - reg2_mse) < 1e-6 * reg1_mse, "Expected mse's to be the same, but got {0}, and {1}".format(reg1_mse, reg2_mse)
assert abs(bin1_auc - bin2_auc) < 3e-4 * bin1_auc, "Expected auc's to be the same, but got {0}, and {1}".format(bin1_auc, bin2_auc)
assert abs(mul1_mse - mul1_mse) < 1e-6 * mul1_mse, "Expected auc's to be the same, but got {0}, and {1}".format(mul1_mse, mul2_mse)
h2o_cars_data = h2o.import_file(h2o.locate("smalldata/junit/cars_20mpg.csv"))
h2o_cars_data["economy_20mpg"] = h2o_cars_data["economy_20mpg"].asfactor()
h2o_cars_data["cylinders"] = h2o_cars_data["cylinders"].asfactor()
# uniform weights same as no weights
random.seed(2222)
weight = random.randint(1,10)
uniform_weights = [[weight] for r in range(406)]
h2o_uniform_weights = h2o.H2OFrame(python_obj=uniform_weights)
h2o_uniform_weights.setNames(["weights"])
h2o_data_uniform_weights = h2o_cars_data.cbind(h2o_uniform_weights)
print "Checking that using uniform weights is equivalent to no weights:"
print
check_same(h2o_cars_data, h2o_data_uniform_weights, weight)
# zero weights same as removed observations
zero_weights = [[0] if random.randint(0,1) else [1] for r in range(406)]
h2o_zero_weights = h2o.H2OFrame(python_obj=zero_weights)
h2o_zero_weights.setNames(["weights"])
h2o_data_zero_weights = h2o_cars_data.cbind(h2o_zero_weights)
h2o_data_zeros_removed = h2o_cars_data[h2o_zero_weights["weights"] == 1]
print "Checking that using some zero weights is equivalent to removing those observations:"
print
check_same(h2o_data_zeros_removed, h2o_data_zero_weights, 1)
# doubled weights same as doubled observations
doubled_weights = [[1] if random.randint(0,1) else [2] for r in range(406)]
h2o_doubled_weights = h2o.H2OFrame(python_obj=doubled_weights)
h2o_doubled_weights.setNames(["weights"])
h2o_data_doubled_weights = h2o_cars_data.cbind(h2o_doubled_weights)
doubled_data = h2o.as_list(h2o_cars_data, use_pandas=False)
colnames = doubled_data.pop(0)
for idx, w in enumerate(doubled_weights):
if w[0] == 2: doubled_data.append(doubled_data[idx])
h2o_data_doubled = h2o.H2OFrame(python_obj=doubled_data)
h2o_data_doubled.setNames(colnames)
h2o_data_doubled["economy_20mpg"] = h2o_data_doubled["economy_20mpg"].asfactor()
h2o_data_doubled["cylinders"] = h2o_data_doubled["cylinders"].asfactor()
h2o_data_doubled_weights["economy_20mpg"] = h2o_data_doubled_weights["economy_20mpg"].asfactor()
h2o_data_doubled_weights["cylinders"] = h2o_data_doubled_weights["cylinders"].asfactor()
print "Checking that doubling some weights is equivalent to doubling those observations:"
print
check_same(h2o_data_doubled, h2o_data_doubled_weights, 1)
# TODO: random weights
# TODO: all zero weights???
# TODO: negative weights???
if __name__ == "__main__":
tests.run_test(sys.argv, weights_check)
| apache-2.0 |
zorojean/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
lazywei/scikit-learn | sklearn/feature_selection/rfe.py | 137 | 17066 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Vincent Michel <vincent.michel@inria.fr>
# Gilles Louppe <g.louppe@gmail.com>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. "
"The parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
| bsd-3-clause |
luminescence/PolyLibScan | Analysis/plotting.py | 1 | 19133 | from matplotlib.artist import setp
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import numba as nb
import pymc as mc
import numerics as num_
class Project(object):
def scatter_plot(self, subset=None, with_errors=False, with_labels=False, with_crossvalidation=False,
confidence_interval=0.95, ax=None, save_path=None, min_dist_to_ac=10,
ignore_experiment=False):
'''create a scatter plot with the probability
of binding (x-axis) and the mean strength of binding
at the active site.
input:
ax: pyplot axis
with_errors: bool
with_labels: bool
with_crossvalidation: bool
confidence_interval: float
ax: pyplot subplot object
save_path: [string]
min_dist_to_ac: float
output:
none
'''
if not ax:
fig, ax = plt.subplots(figsize=(18,12))
ax.set_title('%s (PDB: %s)' % (self.jobs[0].meta['protein_name'],
self.jobs[0].meta['protein'].upper()), size=20)
results = self._scatter_data(subset=subset,
with_errors=with_errors,
with_labels=with_labels,
with_crossvalidation=with_crossvalidation,
confidence_interval=confidence_interval,
min_dist_to_ac=min_dist_to_ac,
ignore_experiment=ignore_experiment)
if with_errors:
error = results['dist_max_error'].max()
else:
error = 0.0
if self.experimental_data is not None:
results.plot(kind='scatter', x='dist_mean', y='energy_mean', alpha=0.7,
ax=ax, c=results.dropna()['color_by_inhibition'], s=100)
if with_errors:
ax.errorbar(results['dist_mean'] ,results['energy_mean'],
xerr=[results['dist_min_error'], results['dist_max_error']],
yerr=[results['energy_min_error'], results['energy_max_error']],
capsize=6, fmt=' ', color='grey', zorder=-1)
legend_items = [mpatches.Patch(color='red', label='not inhibiting')]
legend_items.append(mpatches.Patch(color='blue', label='inhibiting'))
if with_crossvalidation:
classification = results['color_by_inhibition'].apply(lambda x:x=='b')
roc_auc_score = self._roc_auc(classification, results['probabilities'])
kappa = self._kappa(classification, results['model_predictions'])
# plotting black dot on false predictions
results[~results['true_predictions']].plot(kind='scatter', x='dist_mean',
y='energy_mean', ax=ax, c='black', s=40)
legend_items.append(mpatches.Patch(color='black', label='ROC-AUC: %.2f' % roc_auc_score))
legend_items.append(mpatches.Patch(color='black', label='kappa : %.2f' % kappa))
ax.legend(handles=legend_items, fontsize=20, loc='best')
else:
results.plot(kind='scatter', x='dist_mean', y='energy_mean', alpha=0.7,
ax=ax, s=100)
if with_labels:
self._annotate(ax, results, 'dist_mean', 'energy_mean')
ax.tick_params(axis='both', which='major', labelsize=15)
ax.set_ylabel('Energy', size=25)
ax.set_xlabel(r'Binding probability within $%.2f\AA$ to active site' % round(min_dist_to_ac,1), size=25)
ax.set_xlim([-0.2*results['dist_mean'].max(), 1.2*results['dist_mean'].max()+error])
if save_path:
plt.savefig(save_path)
def _annotate(self, ax, df, x_name, y_name):
for key, val in df.iterrows():
ax.annotate(key, (val[x_name], val[y_name]),
xytext=(5,-10), textcoords='offset points',
family='sans-serif', fontsize=12, color='darkslategrey')
def histogramm(self, min_dist_to_ac=5, ax=None, save_path=None):
if not ax:
fig, ax = plt.subplots(figsize=(18,12))
ax.set_title('%s (PDB: %s)' % (self.jobs[0].meta['protein_name'],
self.jobs[0].meta['protein']), size=20)
near_active_site = self.endstate_matrix.stack(0).loc[self.endstate_matrix.stack(0)['Distance'] < min_dist_to_ac, :].unstack()['Energy']
if self.experimental_data is not None:
a = near_active_site.loc[:, self.experimental_data[near_active_site.columns].isnull()].mean()
weights1 = np.ones_like(a)/len(a)
a.hist(ax=ax, weights=weights1, label='not inhibiting', color='red')
b = near_active_site.loc[:, self.experimental_data[near_active_site.columns].isnull()==False].mean()
weights2 = np.ones_like(b)/len(b)
b.hist(ax=ax, weights=weights2, label='inhibiting', alpha=0.7, color='blue')
ax.legend(loc='upper right', fontsize=22)
else:
a = near_active_site.mean()
a.hist(ax=ax)
ax.set_xlabel('Energy', size=25)
ax.set_ylabel('# of copolymer-types', size=25)
if save_path:
plt.savefig(save_path)
def multibox_plot(self, experimental_data=None, ax=None, save_path=None):
if not ax:
fig, ax = plt.subplots(figsize=(18,12))
ax.set_title('%s (PDB: %s)' % (self.jobs[0].meta['protein_name'],
self.jobs[0].meta['protein']), size=20)
if experimental_data is None:
experimental = self.experimental_data
else:
experimental = experimental_data[self.jobs[0].meta['protein']]
energy_matrix = self.endstate_matrix.stack(0)['Energy'].unstack()
col_substitutes = {columns: columns.replace('[x]', '') for columns in energy_matrix.columns}
energy_matrix = energy_matrix.rename(columns=col_substitutes)
shared_columns = list(set(energy_matrix.columns) & set(experimental.index))
p = energy_matrix.loc[:, shared_columns].boxplot(ax=ax, return_type='dict')
ax.set_xticks(rotation=90, size=18)
setp(p['whiskers'], linewidth=2)
setp([item for i,item in enumerate(p['whiskers']) if experimental[shared_columns].isnull()[i/2]], color='red')
setp([item for i,item in enumerate(p['boxes']) if experimental[shared_columns].isnull()[i]], color='red')
setp(p['boxes'], linewidth=4)
setp(p['caps'], linewidth=4)
ax.set_ylabel('Energy', size=20)
ax.set_xlabel('Copolymer', size=20)
inhib_leg = mpatches.Patch(color='red', label='Not inhibiting')
non_inhib_leg = mpatches.Patch(color='blue', label='inhibiting')
ax.legend(handles=[inhib_leg, non_inhib_leg], fontsize=22, loc=1)
# fig.tight_layout()
if save_path:
plt.savefig(save_path)
def plot_distance_density(self, cumulative=False, max_distance_range=None, ax=None, save_path=None):
if not ax:
fig, ax = plt.subplots(figsize=(18,12))
ax.set_title('%s (PDB: %s)' % (self.jobs[0].meta['protein_name'],
self.jobs[0].meta['protein']), size=20)
for name, poly_type in self.polymer_types.items():
poly_type.distance_histogram(max_distance_range=max_distance_range, cumulative=cumulative, ax=ax)
ax.set_xlabel('Distance $\AA$', size=20)
ax.set_ylabel('Probability Density', size=20)
ax.legend(loc=2, ncol=2)
if save_path:
plt.savefig(save_path)
def plot_results(self, min_dist_to_ac=5, save_path=None):
fig = plt.figure(figsize=(18,12))
ax = plt.subplot2grid((2,2),(0,0))
self.scatter_plot(ax=ax, min_dist_to_ac=min_dist_to_ac)
ax2 = plt.subplot2grid((2,2),(0,1))
self.plot_bars(ax=ax2, distance_cutoff=min_dist_to_ac)
ax3 = plt.subplot2grid((2,2),(1,0), colspan=2)
self.plot_distance_density(ax=ax3, max_distance_range=min_dist_to_ac)
if save_path:
plt.savefig(save_path)
def plot_bars(self, distance_cutoff=10, ax=None, save_path=None):
if not ax:
fig, ax = plt.subplots(figsize=(18,12))
ax.set_title('%s (PDB: %s)' % (self.jobs[0].meta['protein_name'],
self.jobs[0].meta['protein']), size=20)
model_inhibition = self._inhibition_series(distance_cutoff)
ic50_data = self._ic50_series()
combined = pd.DataFrame(data={'model': model_inhibition, 'ic50': ic50_data})
combined.sort(columns=['ic50'], ascending=False, inplace=True)
colors = self._bar_colors(combined)
# set unknowns (marked with -1) to NaN
combined.loc[combined['ic50']==-1, 'ic50'] = np.nan
combined.plot(kind='bar', ax=ax, color=colors)
ax.set_xlabel('Polymer Types', size=20)
ax.set_ylabel('Inhibition/Binding', size=20)
if save_path:
plt.savefig(save_path)
def _bar_colors(self, data):
ic50_inhibitor_color = 'steelblue'
inhibitor_color = 'g'
non_inhibitors = 'r'
unknowns = 'k'
color_list = []
for name,d in data.iterrows():
if d.ic50 > 0:
color_list.append(ic50_inhibitor_color)
color_list.append(inhibitor_color)
elif np.isnan(d.ic50):
color_list.append(non_inhibitors)
color_list.append(non_inhibitors)
else:
color_list.append(unknowns)
color_list.append(unknowns)
return color_list
def plot_experimental_model_comparison(self, distance, ax=None, save_path=None):
if not ax:
fig, ax = plt.subplots(figsize=(18,12))
ax.set_title('%s (PDB: %s)' % (self.jobs[0].meta['protein_name'],
self.jobs[0].meta['protein']), size=20)
model_inhibition = self._inhibition_series(distance)
ic50_data = self._ic50_series()
combined = pd.DataFrame(data={'model': model_inhibition, 'ic50': ic50_data})
combined.plot(kind='scatter', x='ic50', y='model')
if save_path:
plt.savefig(save_path)
def _ic50_series(self):
ic_50s = {poly_name: self.polymer_types[poly_name].ic50 for poly_name in self.polymer_types}
return pd.Series(ic_50s).sort(inplace=False, ascending=False)
def _inhibition_series(self, distance_cutoff):
data = {name: polytype.cumulative_binding_probability(distance_cutoff)
for name,polytype in self.polymer_types.iteritems()}
return pd.Series(data)
def plot_inhibition(self, distance=10, ax=None, save_path=None):
if not ax:
fig, ax = plt.subplots(figsize=(18,12))
ax.set_title('%s (PDB: %s)' % (self.jobs[0].meta['protein_name'],
self.jobs[0].meta['protein']), size=20)
data = self._inhibition_series(distance_cutoff=distance)
data.plot(kind='bar', ax=ax)
if save_path:
plt.savefig(save_path)
def plot_ic50(self, ax=None, save_path=None):
if not ax:
fig, ax = plt.subplots(figsize=(18,12))
ax.set_title('%s (PDB: %s)' % (self.jobs[0].meta['protein_name'],
self.jobs[0].meta['protein']), size=20)
data = self._ic50_series()
data_with_experimental_value = data[data!=-1]
if len(data_with_experimental_value) == 0:
raise ValueError('No PolymerTypes with known inhibition in project.')
data_with_experimental_value.plot(kind='bar', ax=ax)
if save_path:
plt.savefig(save_path)
class PolymerTypeSims(object):
def scatter_plot(self, ax=None, save_path=None, with_error=False, min_dist_to_ac=5):
'''create a scatter plot with the probability
of binding (x-axis) and the mean strength of binding
at the active site.
input:
ax: pyplot axis
save_path: [string]
output:
none
'''
if not ax:
fig, ax = plt.subplots(figsize=(18,12))
ax.set_title('Poly Type: %s | Protein %s (PDB: %s)' % (
self.name,
self.sims[0].meta['protein_name'],
self.sims[0].meta['protein'].upper()
),
size=20)
digested_results = pd.DataFrame()
if with_error:
bayes_results_energy = {}
for sim in self.sims:
bayes_results_energy[sim.Id] = sim.bayes_results(model_name='energy')
digested_results['Energy'] = pd.Series({key: val[1] for key,val in bayes_results_energy.items()})
digested_results['EnergyErrMin'] = pd.Series({key: val[0] for key,val in bayes_results_energy.items()})
digested_results['EnergyErrMax'] = pd.Series({key: val[2] for key,val in bayes_results_energy.items()})
bayes_results_distance = {}
for sim in self.sims:
bayes_results_distance[sim.Id] = sim.bayes_results(model_name='distance')
digested_results['BindingProbability'] = pd.Series({key: val[1] for key,val in bayes_results_distance.items()})
digested_results['BindingProbabilityErrMin'] = pd.Series({key: val[0] for key,val in bayes_results_distance.items()})
digested_results['BindingProbabilityErrMax'] = pd.Series({key: val[2] for key,val in bayes_results_distance.items()})
else:
data = self.data_frame()
number_of_polymer_sims = data.shape[0]/len(data.index.get_level_values(0).unique())
near_active_site = data.stack(0).loc[data.stack(0)['Distance']<min_dist_to_ac, :].unstack(0)
digested_results['Energy'] = near_active_site['Energy'].mean()
digested_results['BindingProbability'] = near_active_site['Energy'].count()/number_of_polymer_sims
if self.project.parameters:
digested_results['charge'] = pd.Series({sim.Id: sim.charge for sim in self.sims})
digested_results.plot(kind='scatter', x='BindingProbability', y='Energy',
ax=ax, c=digested_results['charge'], s=100, cmap='gist_rainbow')
else:
digested_results.plot(kind='scatter', x='BindingProbability', y='Energy',
ax=ax, s=100)
if with_error:
ax.errorbar(digested_results['BindingProbability'] ,digested_results['Energy'],
xerr=[digested_results['BindingProbabilityErrMin'], digested_results['BindingProbabilityErrMax']],
yerr=[digested_results['EnergyErrMin'], digested_results['EnergyErrMax']],
fmt=' ', color='grey', zorder=-1)
ax.set_ylabel('Energy', size=25)
ax.set_xlabel('Binding Probability within %.2fA to Active Site' % round(min_dist_to_ac,1), size=20)
ax.set_xlim([0.0-0.2*digested_results['BindingProbability'].max(), 1.2*digested_results['BindingProbability'].max()])
if save_path:
plt.savefig(save_path)
def distance_histogram(self, max_distance_range=None, cumulative=False, ax=None, save_path=None):
# discretised bin container - hopefully nothing higher than
# 200 Angstrom comes along
if not ax:
fig, ax = plt.subplots(figsize=(18,12))
ax.set_title('%s (PDB: %s)' % (self.sims[0].meta['protein_name'], self.sims[0].meta['protein']), size=20)
if self.ic50:
if self.ic50 >= 0:
line_style = '-'
tag = 'inhibiting'
else:
line_style = ':'
tag = 'non-inhibiting'
else:
line_style = '-'
tag = ' '
if not max_distance_range:
max_distance_range = self.distance_probability['distance'].shape[0]
else:
max_distance_range = int(max_distance_range*10)
if cumulative:
density = num_.cumulative_bins(self.distance_probability['density'])
else:
density = self.distance_probability['density']
ax.plot(self.distance_probability['distance'][:max_distance_range],
density[:max_distance_range],
alpha=0.6, linestyle=line_style, lw=3,
label='%s - %s' % (self.name, tag) )
ax.set_xlabel('Distance [$\AA$]')
ax.set_ylabel('Probability')
if save_path:
plt.savefig(save_path)
class Run(object):
def plot_energies(self, ax=None, save_path=None):
if not ax:
fig, ax = plt.subplots(figsize=(18,12))
ax.set_title('%s - %s - Job Id: %d - Run Id: %d' % (self.meta()['protein'],
self.meta()['poly_name'],
self.job.Id, self.Id), size=20)
else:
ax.set_title('Energies of Run %d' % self.Id , size=25)
total_ene_data = self.total_energy()
sim_time_in_ps = total_ene_data[:,1]/1000 * self.job.lmp_parameters['timestep']
ax.plot(sim_time_in_ps, self.total_energy()[:,0], label='Total Energy')
ax.plot(sim_time_in_ps, self.potential_energy()[:,0], label='Potential Energy')
ax.plot(sim_time_in_ps, self.kinetic_energy()[:,0], label='Kinetic Energy')
ax.set_ylabel('Energy', size=20)
ax.set_xlabel('Time [ps]', size=20)
legend1_info = ax.get_legend_handles_labels()
ax2 = ax.twinx()
ax2.plot(sim_time_in_ps, self.binding_energy()[:,0], color='m', label='Binding Energy')
ax2.set_ylabel('Binding Energy', size=20)
ax2.tick_params('y', color='m')
legend2_info = ax2.get_legend_handles_labels()
ax2.legend(legend1_info[0]+legend2_info[0], legend1_info[1]+legend2_info[1],)
if save_path:
plt.savefig(save_path)
def plot_temperature(self, ax=None, save_path=None):
if not ax:
fig, ax = plt.subplots(figsize=(18,12))
ax.set_title('%s - %s - Job Id: %d - Run Id: %d' % (self.meta()['protein'],
self.meta()['poly_name'],
self.job.Id, self.Id), size=20)
else:
ax.set_title('Temperature of Run %d' % self.Id , size=25)
data = self.temperature()
time_series = data[:,1] * self.job.lmp_parameters['timestep']
ax.plot(time_series, data[:, 0], label='Temperature')
ax.set_ylabel('Temperture [K]', size=20)
ax.set_xlabel('Time [ps]', size=20)
legend1_info = ax.get_legend_handles_labels()
if save_path:
plt.savefig(save_path)
| mit |
B3AU/waveTree | sklearn/linear_model/tests/test_omp.py | 5 | 8953 | # Author: Vlad Niculae
# Licence: BSD 3 clause
import warnings
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils.fixes import count_nonzero
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0, precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
assert_greater(len(w), 0) # warnings should be raised
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
omp.fit(X, y[:, 0], Gram=G, Xy=Xy[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(count_nonzero(omp.coef_) <= n_nonzero_coefs)
assert_true(len(w) == 2)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
omp.fit(X, y, Gram=G, Xy=Xy)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
assert_true(len(w) == 2)
def test_scaling_with_gram():
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# Use only 1 nonzero coef to be faster and to avoid warnings
omp1 = OrthogonalMatchingPursuit(n_nonzero_coefs=1,
fit_intercept=False, normalize=False)
omp2 = OrthogonalMatchingPursuit(n_nonzero_coefs=1,
fit_intercept=True, normalize=False)
omp3 = OrthogonalMatchingPursuit(n_nonzero_coefs=1,
fit_intercept=False, normalize=True)
omp1.fit(X, y, Gram=G)
omp1.fit(X, y, Gram=G, Xy=Xy)
assert_true(len(w) == 3)
omp2.fit(X, y, Gram=G)
assert_true(len(w) == 5)
omp2.fit(X, y, Gram=G, Xy=Xy)
assert_true(len(w) == 8)
omp3.fit(X, y, Gram=G)
assert_true(len(w) == 10)
omp3.fit(X, y, Gram=G, Xy=Xy)
assert_true(len(w) == 13)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
orthogonal_mp(newX, newy, 2)
assert_true(len(w) == 1)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
gamma_empty = orthogonal_mp(X, y_empty, 1)
gamma_empty_gram = orthogonal_mp_gram(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
kipohl/ncanda-data-integration | scripts/redcap/scoring/brief/__init__.py | 2 | 9447 | #!/usr/bin/env python
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
import os
import time
import datetime
import pandas
#
# Behavior Rating Inventory of Executive Function (BRIEF)
#
input_fields = { 'youthreport2' : [ 'youthreport2_brief_sec1_brief1', 'youthreport2_brief_sec1_brief2', 'youthreport2_brief_sec1_brief3', 'youthreport2_brief_sec1_brief4',
'youthreport2_brief_sec1_brief5', 'youthreport2_brief_sec1_brief6', 'youthreport2_brief_sec1_brief7', 'youthreport2_brief_sec2_brief8',
'youthreport2_brief_sec2_brief9', 'youthreport2_brief_sec2_brief10', 'youthreport2_brief_sec2_brief11', 'youthreport2_brief_sec2_brief12',
'youthreport2_brief_sec2_brief13', 'youthreport2_brief_sec2_brief14', 'youthreport2_brief_sec3_brief15', 'youthreport2_brief_sec3_brief16',
'youthreport2_brief_sec3_brief17', 'youthreport2_brief_sec3_brief18', 'youthreport2_brief_sec3_brief19', 'youthreport2_brief_sec3_brief20',
'youthreport2_brief_sec3_brief21', 'youthreport2_brief_sec4_brief22', 'youthreport2_brief_sec4_brief23', 'youthreport2_brief_sec4_brief24',
'youthreport2_brief_sec4_brief25', 'youthreport2_brief_sec4_brief26', 'youthreport2_brief_sec4_brief27', 'youthreport2_brief_sec4_brief28',
'youthreport2_brief_sec5_brief29', 'youthreport2_brief_sec5_brief30', 'youthreport2_brief_sec5_brief31', 'youthreport2_brief_sec5_brief32',
'youthreport2_brief_sec5_brief33', 'youthreport2_brief_sec5_brief34', 'youthreport2_brief_sec5_brief35', 'youthreport2_brief_sec6_brief36',
'youthreport2_brief_sec6_brief37', 'youthreport2_brief_sec6_brief38', 'youthreport2_brief_sec6_brief39', 'youthreport2_brief_sec6_brief40',
'youthreport2_brief_sec6_brief41', 'youthreport2_brief_sec6_brief42', 'youthreport2_brief_sec7_brief43', 'youthreport2_brief_sec7_brief44',
'youthreport2_brief_sec7_brief45', 'youthreport2_brief_sec7_brief46', 'youthreport2_brief_sec7_brief47', 'youthreport2_brief_sec7_brief48',
'youthreport2_brief_sec7_brief49', 'youthreport2_brief_sec8_brief50', 'youthreport2_brief_sec8_brief51', 'youthreport2_brief_sec8_brief52',
'youthreport2_brief_sec8_brief53', 'youthreport2_brief_sec8_brief54', 'youthreport2_brief_sec8_brief55', 'youthreport2_brief_sec8_brief56',
'youthreport2_brief_sec9_brief57', 'youthreport2_brief_sec9_brief58', 'youthreport2_brief_sec9_brief59', 'youthreport2_brief_sec9_brief60',
'youthreport2_brief_sec9_brief61', 'youthreport2_brief_sec9_brief62', 'youthreport2_brief_sec9_brief63', 'youthreport2_brief_sec10_brief64',
'youthreport2_brief_sec10_brief65', 'youthreport2_brief_sec10_brief66', 'youthreport2_brief_sec10_brief67', 'youthreport2_brief_sec10_brief68',
'youthreport2_brief_sec10_brief69', 'youthreport2_brief_sec10_brief70', 'youthreport2_brief_sec11_brief71', 'youthreport2_brief_sec11_brief72',
'youthreport2_brief_sec11_brief73', 'youthreport2_brief_sec11_brief74', 'youthreport2_brief_sec11_brief75', 'youthreport2_brief_sec11_brief76',
'youthreport2_brief_sec11_brief77', 'youthreport2_brief_sec12_brief78', 'youthreport2_brief_sec12_brief79', 'youthreport2_brief_sec12_brief80',
'youthreport2_date_interview' ] }
output_form = 'brief'
module_dir = os.path.dirname(os.path.abspath(__file__))
lookup_global = pandas.io.parsers.read_csv( os.path.join( module_dir, 'BRIEF_lookup_global_scales.csv' ), header=0, index_col=[0,1,2] )
lookup_subscales = pandas.io.parsers.read_csv( os.path.join( module_dir, 'BRIEF_lookup_subscales.csv' ), header=0, index_col=[0,1,2] )
lookup_index = pandas.io.parsers.read_csv( os.path.join( module_dir, 'BRIEF_lookup_index.csv' ), header=0, index_col=[0,1,2] )
# From the BRIEF VBA script - indexes the questions to the subscales
question_to_subscales = { 1: 1, 2: 3, 3: 6, 4: 7, 5: 4, 6: 8, 7: 5, 8: 9, 9: 2,
10: 1, 11: 3, 12: 6, 13: 7, 14: 4, 15: 8, 16: 5, 17: 9, 18: 2, 19: 1,
20: 9, 21: 6, 22: 7, 23: 4, 24: 8, 25: 5, 26: 9, 27: 2, 28: 1, 29: 7,
30: 6, 31: 7, 32: 4, 33: 8, 34: 5, 35: 9, 36: 2, 37: 1, 38: 9, 39: 6,
40: 7, 41: 4, 42: 8, 43: 5, 44: 9, 45: 2, 46: 1, 47: 7, 48: 6, 49: 7,
50: 4, 51: 8, 52: 6, 53: 9, 54: 1, 55: 3, 56: 6, 57: 7, 58: 4, 59: 8,
60: 7, 61: 1, 62: 3, 63: 6, 64: 7, 65: 4, 66: 1, 67: 3, 68: 6, 69: 7,
70: 4, 71: 1, 72: 9, 73: 6, 74: 7, 75: 4, 76: 1, 77: 9, 78: 6, 79: 1,
80: 1 }
# From the BRIEF VBA script - indexes the negativity questions
negativity_questions = [ 10, 11, 17, 19, 25, 30, 32, 43, 45, 54 ]
# From the BRIEF VBA script - indexes the inconsistency questions
inconsistency_questions = [ ( 8,26), (14,32), (20,77), (23,41), (38,72), (46,79), (55,67), (56,68), (58,65), (63,73) ]
# Score labels
labels = [ "", "brief_inhibit", "brief_beh_shift", "brief_cog_shift", "brief_control", "brief_monitor", "brief_memory", "brief_plan", "brief_materials", "brief_task", "brief_shift", "brief_bri", "brief_mi", "brief_gec" ]
def compute_scores( data, demographics ):
# Eliminate all records with missing data
data = data.dropna()
# Initialize raw scores as 0
for idx in range(1,11):
data[ labels[idx]+'_raw' ] = 0
for question, scale in question_to_subscales.iteritems():
# Must add "+1" to each response because our scale is 0..2, whereas original BRIEF implementation is 1..3
data[ labels[scale]+'_raw' ] = data[ labels[scale]+'_raw' ] + (1+data[ input_fields['youthreport2'][question-1] ])
# Calculate summary raw scores
data[ labels[10]+'_raw' ] = data[ labels[2]+'_raw' ] + data[ labels[3]+'_raw' ]
data[ labels[11]+'_raw' ] = data[ labels[1]+'_raw' ] + data[ labels[10]+'_raw'] + data[ labels[4]+'_raw' ] + data[ labels[5]+'_raw' ]
data[ labels[12]+'_raw' ] = data[ labels[6]+'_raw' ] + data[ labels[7]+'_raw' ] + data[ labels[8]+'_raw' ] + data[ labels[9]+'_raw' ]
data[ labels[13]+'_raw' ] = data[ labels[11]+'_raw'] + data[ labels[12]+'_raw']
# What is each subject's age at test?
date_format_ymd = '%Y-%m-%d'
data['brief_age'] = 0.0
for key, row in data.iterrows():
dob = demographics['dob'][key[0]]
data['brief_age'][key] = (datetime.datetime.strptime( row['youthreport2_date_interview'], date_format_ymd ) - datetime.datetime.strptime( dob, date_format_ymd )).days / 365.242
# Compute negativity
data['brief_neg'] = 0
for idx in range(1,11):
# Have to count "2"s, because our scale is 0..2, whereas original implementation used scale 1..3
data['brief_neg'] = data['brief_neg'] + data[ input_fields['youthreport2'][negativity_questions[idx-1]-1] ].map( lambda x: 1 if x==2 else 0 )
# Compute inconsistency
data['brief_incon'] = 0
for idx in range(1,11):
data['brief_incon'] = data['brief_incon'] + (data[ input_fields['youthreport2'][inconsistency_questions[idx-1][0]-1] ] - data[ input_fields['youthreport2'][inconsistency_questions[idx-1][1]-1] ]).abs()
# Lookup from subscale
for idx in range(1,11):
data[ labels[idx]+'_t' ] = 0
data[ labels[idx]+'_p' ] = 0
for key, row in data.iterrows():
sex = demographics['sex'][key[0]]
lookup_key = ( 14 if row['brief_age'] < 15 else 15, 'F' if sex == 0 else 'M', row[ labels[idx]+'_raw' ] )
data[ labels[idx]+'_t' ][key] = lookup_subscales[labels[idx]+'_t'][lookup_key]
data[ labels[idx]+'_p' ][key] = lookup_subscales[labels[idx]+'_p'][lookup_key]
for idx in range(11,13):
data[ labels[idx]+'_t' ] = 0
data[ labels[idx]+'_p' ] = 0
for key, row in data.iterrows():
sex = demographics['sex'][key[0]]
lookup_key = ( 14 if row['brief_age'] < 15 else 15, 'F' if sex == 0 else 'M', row[ labels[idx]+'_raw' ] )
data[ labels[idx]+'_t' ][key] = lookup_index[labels[idx]+'_t'][lookup_key]
data[ labels[idx]+'_p' ][key] = lookup_index[labels[idx]+'_p'][lookup_key]
data[ labels[13]+'_t' ] = 0
data[ labels[13]+'_p' ] = 0
for key, row in data.iterrows():
sex = demographics['sex'][key[0]]
lookup_key = ( 14 if row['brief_age'] < 15 else 15, 'F' if sex == 0 else 'M', row[ labels[13]+'_raw' ] )
data[ labels[13]+'_t' ][key] = lookup_global[labels[13]+'_t'][lookup_key]
data[ labels[13]+'_p' ][key] = lookup_global[labels[13]+'_p'][lookup_key]
data['brief_complete'] = '1'
return data[['%s_%s' % (label,score) for score in ['raw','t','p'] for label in labels if label != '']+['brief_age','brief_neg','brief_incon','brief_complete']]
| bsd-3-clause |
tracierenea/gnuradio | gnuradio-runtime/examples/volk_benchmark/volk_plot.py | 78 | 6117 | #!/usr/bin/env python
import sys, math
import argparse
from volk_test_funcs import *
try:
import matplotlib
import matplotlib.pyplot as plt
except ImportError:
sys.stderr.write("Could not import Matplotlib (http://matplotlib.sourceforge.net/)\n")
sys.exit(1)
def main():
desc='Plot Volk performance results from a SQLite database. ' + \
'Run one of the volk tests first (e.g, volk_math.py)'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-D', '--database', type=str,
default='volk_results.db',
help='Database file to read data from [default: %(default)s]')
parser.add_argument('-E', '--errorbars',
action='store_true', default=False,
help='Show error bars (1 standard dev.)')
parser.add_argument('-P', '--plot', type=str,
choices=['mean', 'min', 'max'],
default='mean',
help='Set the type of plot to produce [default: %(default)s]')
parser.add_argument('-%', '--percent', type=str,
default=None, metavar="table",
help='Show percent difference to the given type [default: %(default)s]')
args = parser.parse_args()
# Set up global plotting properties
matplotlib.rcParams['figure.subplot.bottom'] = 0.2
matplotlib.rcParams['figure.subplot.top'] = 0.95
matplotlib.rcParams['figure.subplot.right'] = 0.98
matplotlib.rcParams['ytick.labelsize'] = 16
matplotlib.rcParams['xtick.labelsize'] = 16
matplotlib.rcParams['legend.fontsize'] = 18
# Get list of tables to compare
conn = create_connection(args.database)
tables = list_tables(conn)
M = len(tables)
# Colors to distinguish each table in the bar graph
# More than 5 tables will wrap around to the start.
colors = ['b', 'r', 'g', 'm', 'k']
# Set up figure for plotting
f0 = plt.figure(0, facecolor='w', figsize=(14,10))
s0 = f0.add_subplot(1,1,1)
# Create a register of names that exist in all tables
tmp_regs = []
for table in tables:
# Get results from the next table
res = get_results(conn, table[0])
tmp_regs.append(list())
for r in res:
try:
tmp_regs[-1].index(r['kernel'])
except ValueError:
tmp_regs[-1].append(r['kernel'])
# Get only those names that are common in all tables
name_reg = tmp_regs[0]
for t in tmp_regs[1:]:
name_reg = list(set(name_reg) & set(t))
name_reg.sort()
# Pull the data out for each table into a dictionary
# we can ref the table by it's name and the data associated
# with a given kernel in name_reg by it's name.
# This ensures there is no sorting issue with the data in the
# dictionary, so the kernels are plotted against each other.
table_data = dict()
for i,table in enumerate(tables):
# Get results from the next table
res = get_results(conn, table[0])
data = dict()
for r in res:
data[r['kernel']] = r
table_data[table[0]] = data
if args.percent is not None:
for i,t in enumerate(table_data):
if args.percent == t:
norm_data = []
for name in name_reg:
if(args.plot == 'max'):
norm_data.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
norm_data.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
norm_data.append(table_data[t][name]['avg'])
# Plot the results
x0 = xrange(len(name_reg))
i = 0
for t in (table_data):
ydata = []
stds = []
for name in name_reg:
stds.append(math.sqrt(table_data[t][name]['var']))
if(args.plot == 'max'):
ydata.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
ydata.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
ydata.append(table_data[t][name]['avg'])
if args.percent is not None:
ydata = [-100*(y-n)/y for y,n in zip(ydata,norm_data)]
if(args.percent != t):
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80/(M-1)
x1 = [x + i*wdth for x in x0]
i += 1
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80/M
x1 = [x + i*wdth for x in x0]
i += 1
if(args.errorbars is False):
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
s0.bar(x1, ydata, width=wdth,
yerr=stds,
color=colors[i%M], label=t,
edgecolor='k', linewidth=2,
error_kw={"ecolor": 'k', "capsize":5,
"linewidth":2})
nitems = res[0]['nitems']
if args.percent is None:
s0.set_ylabel("Processing time (sec) [{0:G} items]".format(nitems),
fontsize=22, fontweight='bold',
horizontalalignment='center')
else:
s0.set_ylabel("% Improvement over {0} [{1:G} items]".format(
args.percent, nitems),
fontsize=22, fontweight='bold')
s0.legend()
s0.set_xticks(x0)
s0.set_xticklabels(name_reg)
for label in s0.xaxis.get_ticklabels():
label.set_rotation(45)
label.set_fontsize(16)
plt.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
eramirem/astroML | examples/datasets/plot_LIGO_spectrum.py | 3 | 2513 | """
Plot the power spectrum of LIGO
-------------------------------
This compares the power spectrum computed using the raw FFT, and using
Welch's method (i.e. overlapping window functions that reduce noise).
The top panel shows the raw signal, which is the measurements of the
change in baseline length. The bottom panel shows the raw and smoothed
power spectrum, used by the LIGO team to characterize the noise
of the detector. The particular data used here is the injected
`Big Dog <http://www.ligo.org/news/blind-injection.php>`_ event.
"""
# Author: Jake VanderPlas <vanderplas@astro.washington.edu>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
import numpy as np
from matplotlib import pyplot as plt
from scipy import fftpack
from matplotlib import mlab
from astroML.datasets import fetch_LIGO_large
#------------------------------------------------------------
# Fetch the LIGO hanford data
data, dt = fetch_LIGO_large()
# subset of the data to plot
t0 = 646
T = 2
tplot = dt * np.arange(T * 4096)
dplot = data[4096 * t0: 4096 * (t0 + T)]
tplot = tplot[::10]
dplot = dplot[::10]
fmin = 40
fmax = 2060
#------------------------------------------------------------
# compute PSD using simple FFT
N = len(data)
df = 1. / (N * dt)
PSD = abs(dt * fftpack.fft(data)[:N / 2]) ** 2
f = df * np.arange(N / 2)
cutoff = ((f >= fmin) & (f <= fmax))
f = f[cutoff]
PSD = PSD[cutoff]
f = f[::100]
PSD = PSD[::100]
#------------------------------------------------------------
# compute PSD using Welch's method -- hanning window function
PSDW2, fW2 = mlab.psd(data, NFFT=4096, Fs=1. / dt,
window=mlab.window_hanning, noverlap=2048)
dfW2 = fW2[1] - fW2[0]
cutoff = (fW2 >= fmin) & (fW2 <= fmax)
fW2 = fW2[cutoff]
PSDW2 = PSDW2[cutoff]
#------------------------------------------------------------
# Plot the data
fig = plt.figure()
fig.subplots_adjust(bottom=0.1, top=0.9, hspace=0.3)
# top panel: time series
ax = fig.add_subplot(211)
ax.plot(tplot, dplot, '-k')
ax.set_xlabel('time (s)')
ax.set_ylabel('$h(t)$')
ax.set_ylim(-1.2E-18, 1.2E-18)
# bottom panel: hanning window
ax = fig.add_subplot(212)
ax.loglog(f, PSD, '-', c='#AAAAAA')
ax.loglog(fW2, PSDW2, '-k')
ax.text(0.98, 0.95, "Hanning (cosine) window",
ha='right', va='top', transform=ax.transAxes)
ax.set_xlabel('frequency (Hz)')
ax.set_ylabel(r'$PSD(f)$')
ax.set_xlim(40, 2060)
ax.set_ylim(1E-46, 1E-36)
ax.yaxis.set_major_locator(plt.LogLocator(base=100))
plt.show()
| bsd-2-clause |
qiime2/q2-diversity | q2_diversity/tests/test_alpha.py | 2 | 20675 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import io
import os
import tempfile
import unittest
import biom
import skbio
import qiime2
from qiime2.plugin.testing import TestPluginBase
import numpy as np
import pandas as pd
import pandas.testing as pdt
from qiime2 import Artifact
from q2_diversity import (alpha_correlation, alpha_group_significance)
class AlphaTests(TestPluginBase):
package = 'q2_diversity.tests'
def setUp(self):
super().setUp()
self.alpha = self.plugin.pipelines['alpha']
self.alpha_phylogenetic = self.plugin.pipelines['alpha_phylogenetic']
empty_table = self.get_data_path('empty.biom')
self.empty_table = Artifact.import_data('FeatureTable[Frequency]',
empty_table)
two_feature_table = self.get_data_path('two_feature_table.biom')
self.two_feature_table = Artifact.import_data(
'FeatureTable[Frequency]',
two_feature_table)
three_feature_tree = self.get_data_path('three_feature.tree')
self.three_feature_tree = Artifact.import_data('Phylogeny[Rooted]',
three_feature_tree)
t = biom.Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
self.t = Artifact.import_data('FeatureTable[Frequency]', t)
tree = skbio.TreeNode.read(io.StringIO(
'((O1:0.25, O2:0.50):0.25, O3:0.75)root;'))
self.tree = Artifact.import_data('Phylogeny[Rooted]', tree)
def test_alpha(self):
actual = self.alpha(table=self.t, metric='observed_features')
actual = actual[0].view(pd.Series)
# expected computed by hand
expected = pd.Series({'S1': 1, 'S2': 2, 'S3': 2},
name='observed_features')
pdt.assert_series_equal(actual, expected)
def test_alpha_with_passthrough_metric(self):
actual = self.alpha(table=self.t, metric='singles')
actual = actual[0].view(pd.Series)
# expected computed by hand
expected = pd.Series({'S1': 1, 'S2': 2, 'S3': 0},
name='singles')
pdt.assert_series_equal(actual, expected)
def test_alpha_phylo_metric(self):
with self.assertRaisesRegex(TypeError, 'faith_pd.*incompatible'):
self.alpha(table=self.t, metric='faith_pd')
def test_alpha_unknown_metric(self):
with self.assertRaisesRegex(TypeError, 'not-a-metric.*incompatible'):
self.alpha(table=self.t, metric='not-a-metric')
def test_alpha_empty_table(self):
with self.assertRaisesRegex(ValueError, "empty"):
self.alpha(table=self.empty_table, metric='observed_features')
def test_alpha_phylogenetic(self):
actual = self.alpha_phylogenetic(table=self.two_feature_table,
phylogeny=self.three_feature_tree,
metric='faith_pd')
actual = actual[0].view(pd.Series)
# expected computed with skbio.diversity.alpha_diversity
expected = pd.Series({'S1': 0.75, 'S2': 1.0, 'S3': 1.0},
name='faith_pd')
pdt.assert_series_equal(actual, expected)
def test_alpha_phylogenetic_non_phylo_metric(self):
with self.assertRaisesRegex(TypeError,
'observed_features.*incompatible'):
self.alpha_phylogenetic(table=self.two_feature_table,
phylogeny=self.three_feature_tree,
metric='observed_features')
def test_alpha_phylogenetic_unknown_metric(self):
with self.assertRaisesRegex(TypeError, 'not-a-metric.*incompatible'):
self.alpha_phylogenetic(table=self.two_feature_table,
phylogeny=self.three_feature_tree,
metric='not-a-metric')
def test_alpha_phylogenetic_empty_table(self):
with self.assertRaisesRegex(ValueError, "empty"):
self.alpha_phylogenetic(table=self.empty_table,
phylogeny=self.three_feature_tree,
metric='faith_pd')
class AlphaCorrelationTests(unittest.TestCase):
def test_spearman(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame(
{'value': [1.0, 2.0, 3.0]},
index=pd.Index(['sample1', 'sample2', 'sample3'], name='id')))
with tempfile.TemporaryDirectory() as output_dir:
alpha_correlation(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
jsonp_fp = os.path.join(output_dir, 'column-value.jsonp')
self.assertTrue(os.path.exists(jsonp_fp))
with open(jsonp_fp) as jsonp_fh:
jsonp_content = jsonp_fh.read()
self.assertTrue('Spearman' in jsonp_content)
self.assertTrue('"sampleSize": 3' in jsonp_content)
self.assertTrue('"data":' in jsonp_content)
self.assertFalse('filtered' in jsonp_content)
def test_pearson(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame(
{'value': [1.0, 2.0, 3.0]},
index=pd.Index(['sample1', 'sample2', 'sample3'], name='id')))
with tempfile.TemporaryDirectory() as output_dir:
alpha_correlation(output_dir, alpha_div, md, method='pearson')
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
jsonp_fp = os.path.join(output_dir, 'column-value.jsonp')
self.assertTrue(os.path.exists(jsonp_fp))
with open(jsonp_fp) as jsonp_fh:
jsonp_content = jsonp_fh.read()
self.assertTrue('Pearson' in jsonp_content)
self.assertTrue('"sampleSize": 3' in jsonp_content)
self.assertTrue('"data":' in jsonp_content)
self.assertFalse('filtered' in jsonp_content)
def test_bad_method(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame(
{'value': [1.0, 2.0, 3.0]},
index=pd.Index(['sample1', 'sample2', 'sample3'], name='id')))
with tempfile.TemporaryDirectory() as output_dir:
with self.assertRaises(ValueError):
alpha_correlation(output_dir, alpha_div, md, method='bad!')
def test_non_numeric_metadata(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame(
{'col1': [4, 5, 6],
'col2': ['a', 'b', 'c']},
index=pd.Index(['sample1', 'sample2', 'sample3'], name='id')))
with tempfile.TemporaryDirectory() as output_dir:
alpha_correlation(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
self.assertTrue(os.path.exists(
os.path.join(output_dir,
'column-col1.jsonp')))
self.assertFalse(os.path.exists(
os.path.join(output_dir,
'column-col2.jsonp')))
with open(index_fp) as index_fh:
index_content = index_fh.read()
self.assertTrue('contain numeric data' in index_content)
self.assertTrue('<strong>col2' in index_content)
def test_nan_metadata(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame(
{'value': [1.0, 2.0, np.nan]},
index=pd.Index(['sample1', 'sample2', 'sample3'], name='id')))
with tempfile.TemporaryDirectory() as output_dir:
alpha_correlation(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
jsonp_fp = os.path.join(output_dir, 'column-value.jsonp')
self.assertTrue(os.path.exists(jsonp_fp))
with open(jsonp_fp) as jsonp_fh:
jsonp_content = jsonp_fh.read()
self.assertTrue('"filtered": 2' in jsonp_content)
self.assertTrue('"initial": 3' in jsonp_content)
def test_extra_metadata(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame(
{'value': [1.0, 2.0, 3.0, 4.0]},
index=pd.Index(['sample1', 'sample2', 'sample3',
'sample4'], name='id')))
with tempfile.TemporaryDirectory() as output_dir:
alpha_correlation(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
jsonp_fp = os.path.join(output_dir, 'column-value.jsonp')
self.assertTrue(os.path.exists(jsonp_fp))
with open(jsonp_fp) as jsonp_fh:
self.assertTrue('"sampleSize": 3' in jsonp_fh.read())
def test_extra_alpha_div_no_intersect(self):
alpha_div = pd.Series([2.0, 4.0, 6.0, 8.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3',
'sample4'])
md = qiime2.Metadata(
pd.DataFrame(
{'value': [1.0, 2.0, 3.0]},
index=pd.Index(['sample1', 'sample2', 'sample3'], name='id')))
with tempfile.TemporaryDirectory() as output_dir:
with self.assertRaisesRegex(ValueError,
'not present.*metadata.*sample4'):
alpha_correlation(output_dir, alpha_div, md)
def test_extra_alpha_div_intersect(self):
alpha_div = pd.Series([2.0, 4.0, 6.0, 8.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3',
'sample4'])
md = qiime2.Metadata(
pd.DataFrame(
{'value': [1.0, 2.0, 3.0]},
index=pd.Index(['sample1', 'sample2', 'sample3'], name='id')))
with tempfile.TemporaryDirectory() as output_dir:
alpha_correlation(output_dir, alpha_div, md, intersect_ids=True)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
jsonp_fp = os.path.join(output_dir, 'column-value.jsonp')
self.assertTrue(os.path.exists(jsonp_fp))
def test_all_metadata_columns_filtered(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
# Non-numeric and empty columns are filtered.
md = qiime2.Metadata(
pd.DataFrame(
{'col1': ['a', 'b', 'a'],
'col2': [np.nan, np.nan, np.nan]},
index=pd.Index(['sample1', 'sample2', 'sample3'], name='id')))
with tempfile.TemporaryDirectory() as output_dir:
with self.assertRaisesRegex(
ValueError, 'contains only non-numeric or empty columns'):
alpha_correlation(output_dir, alpha_div, md)
class AlphaGroupSignificanceTests(unittest.TestCase):
def test_alpha_group_significance(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame(
{'a or b': ['a', 'b', 'b']},
index=pd.Index(['sample1', 'sample2', 'sample3'], name='id')))
with tempfile.TemporaryDirectory() as output_dir:
alpha_group_significance(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
self.assertTrue(os.path.exists(
os.path.join(output_dir,
'column-a%20or%20b.jsonp')))
with open(index_fp) as index_fh:
index_content = index_fh.read()
self.assertTrue('Kruskal-Wallis (all groups)' in index_content)
self.assertTrue('Kruskal-Wallis (pairwise)' in index_content)
def test_alpha_group_significance_some_numeric(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame(
{'a or b': ['a', 'b', 'b'],
'bad': [1.0, 2.0, 3.0]},
index=pd.Index(['sample1', 'sample2', 'sample3'], name='id')))
with tempfile.TemporaryDirectory() as output_dir:
alpha_group_significance(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
self.assertTrue(os.path.exists(
os.path.join(output_dir,
'column-a%20or%20b.jsonp')))
self.assertFalse(os.path.exists(
os.path.join(output_dir,
'column-bad.jsonp')))
with open(index_fp) as index_fh:
index_content = index_fh.read()
self.assertTrue('contain categorical data:' in index_content)
self.assertTrue('<strong>bad' in index_content)
def test_alpha_group_significance_one_group_all_unique_values(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame(
{'a or b': ['a', 'b', 'b'],
'bad': ['x', 'y', 'z']},
index=pd.Index(['sample1', 'sample2', 'sample3'], name='id')))
with tempfile.TemporaryDirectory() as output_dir:
alpha_group_significance(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
self.assertTrue(os.path.exists(
os.path.join(output_dir,
'column-a%20or%20b.jsonp')))
self.assertFalse(os.path.exists(
os.path.join(output_dir,
'column-bad.jsonp')))
with open(index_fp) as index_fh:
index_content = index_fh.read()
self.assertTrue('number of samples' in index_content)
self.assertTrue('<strong>bad' in index_content)
def test_alpha_group_significance_one_group_single_value(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame(
{'a or b': ['a', 'b', 'b'],
'bad': ['x', 'x', 'x']},
index=pd.Index(['sample1', 'sample2', 'sample3'], name='id')))
with tempfile.TemporaryDirectory() as output_dir:
alpha_group_significance(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
self.assertTrue(os.path.exists(
os.path.join(output_dir,
'column-a%20or%20b.jsonp')))
self.assertFalse(os.path.exists(
os.path.join(output_dir,
'column-bad.jsonp')))
with open(index_fp) as index_fh:
index_content = index_fh.read()
self.assertTrue('single group' in index_content)
self.assertTrue('<strong>bad' in index_content)
def test_alpha_group_significance_KW_value_error(self):
alpha_div = pd.Series([2.0, 2.0, 3.0, 2.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3',
'sample4'])
md = qiime2.Metadata(
pd.DataFrame({'x': ['a', 'b', 'b', 'c']},
index=pd.Index(['sample1', 'sample2', 'sample3',
'sample4'], name='id')))
with tempfile.TemporaryDirectory() as output_dir:
alpha_group_significance(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
self.assertTrue(os.path.exists(
os.path.join(output_dir,
'column-x.jsonp')))
with open(index_fp) as index_fh:
index_content = index_fh.read()
self.assertTrue('pairwise group comparisons have been omitted'
in index_content)
self.assertTrue('x:c (n=1) vs x:a (n=1)' in index_content)
def test_alpha_group_significance_numeric_only(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame(
{'col1': [1, 2, 1],
'col2': [4.2, 4.2, 4.3]},
index=pd.Index(['sample1', 'sample2', 'sample3'], name='id')))
with tempfile.TemporaryDirectory() as output_dir:
err_msg = ("does not contain any columns that satisfy this "
"visualizer's requirements")
with self.assertRaisesRegex(ValueError, err_msg):
alpha_group_significance(output_dir, alpha_div, md)
def test_alpha_group_significance_single_quote(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame(
{'a or b': ['a', "b'", "b'"]},
index=pd.Index(['sample1', 'sample2', 'sample3'], name='id')))
with tempfile.TemporaryDirectory() as output_dir:
alpha_group_significance(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
with open(index_fp) as index_fh:
self.assertTrue("\'" in index_fh.read())
def test_alpha_group_significance_forward_slash_in_metadata_col(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame(
{'a/b': ['a', 'b', 'b']},
index=pd.Index(['sample1', 'sample2', 'sample3'], name='id')))
with tempfile.TemporaryDirectory() as output_dir:
alpha_group_significance(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
with open(index_fp) as index_fh:
self.assertTrue("/" in index_fh.read())
jsonp_fp = os.path.join(output_dir, 'column-a%2Fb.jsonp')
self.assertTrue(os.path.exists(jsonp_fp))
csv_fp = os.path.join(output_dir,
'kruskal-wallis-pairwise-a%2Fb.csv')
self.assertTrue(os.path.exists(csv_fp))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
PG-TUe/tpot | tests/test_config_sparse.py | 4 | 1422 | # -*- coding: utf-8 -*-
"""This file is part of the TPOT library.
TPOT was primarily developed at the University of Pennsylvania by:
- Randal S. Olson (rso@randalolson.com)
- Weixuan Fu (weixuanf@upenn.edu)
- Daniel Angell (dpa34@drexel.edu)
- and many more generous open source contributors
TPOT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
TPOT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with TPOT. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
tpot_config = {
'sklearn.neighbors.KNeighborsClassifier': {
'n_neighbors': range(1, 101),
'weights': ["uniform", "distance"],
'p': [1, 2]
},
'sklearn.ensemble.RandomForestClassifier': {
'n_estimators': [100],
'criterion': ["gini", "entropy"],
'max_features': np.arange(0.05, 1.01, 0.05),
'min_samples_split': range(2, 21),
'min_samples_leaf': range(1, 21),
'bootstrap': [True, False]
}
}
| lgpl-3.0 |
UNR-AERIAL/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | 8061 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
aetilley/scikit-learn | examples/model_selection/plot_roc.py | 146 | 3697 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
# Plot ROC curve
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]))
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
0x0all/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/units.py | 70 | 4810 | """
The classes here provide support for using custom classes with
matplotlib, eg those that do not expose the array interface but know
how to converter themselves to arrays. It also supoprts classes with
units and units conversion. Use cases include converters for custom
objects, eg a list of datetime objects, as well as for objects that
are unit aware. We don't assume any particular units implementation,
rather a units implementation must provide a ConversionInterface, and
the register with the Registry converter dictionary. For example,
here is a complete implementation which support plotting with native
datetime objects
import matplotlib.units as units
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import datetime
class DateConverter(units.ConversionInterface):
def convert(value, unit):
'convert value to a scalar or array'
return dates.date2num(value)
convert = staticmethod(convert)
def axisinfo(unit):
'return major and minor tick locators and formatters'
if unit!='date': return None
majloc = dates.AutoDateLocator()
majfmt = dates.AutoDateFormatter(majloc)
return AxisInfo(majloc=majloc,
majfmt=majfmt,
label='date')
axisinfo = staticmethod(axisinfo)
def default_units(x):
'return the default unit for x or None'
return 'date'
default_units = staticmethod(default_units)
# finally we register our object type with a converter
units.registry[datetime.date] = DateConverter()
"""
import numpy as np
from matplotlib.cbook import iterable, is_numlike
class AxisInfo:
'information to support default axis labeling and tick labeling'
def __init__(self, majloc=None, minloc=None,
majfmt=None, minfmt=None, label=None):
"""
majloc and minloc: TickLocators for the major and minor ticks
majfmt and minfmt: TickFormatters for the major and minor ticks
label: the default axis label
If any of the above are None, the axis will simply use the default
"""
self.majloc = majloc
self.minloc = minloc
self.majfmt = majfmt
self.minfmt = minfmt
self.label = label
class ConversionInterface:
"""
The minimal interface for a converter to take custom instances (or
sequences) and convert them to values mpl can use
"""
def axisinfo(unit):
'return an units.AxisInfo instance for unit'
return None
axisinfo = staticmethod(axisinfo)
def default_units(x):
'return the default unit for x or None'
return None
default_units = staticmethod(default_units)
def convert(obj, unit):
"""
convert obj using unit. If obj is a sequence, return the
converted sequence. The ouput must be a sequence of scalars
that can be used by the numpy array layer
"""
return obj
convert = staticmethod(convert)
def is_numlike(x):
"""
The matplotlib datalim, autoscaling, locators etc work with
scalars which are the units converted to floats given the
current unit. The converter may be passed these floats, or
arrays of them, even when units are set. Derived conversion
interfaces may opt to pass plain-ol unitless numbers through
the conversion interface and this is a helper function for
them.
"""
if iterable(x):
for thisx in x:
return is_numlike(thisx)
else:
return is_numlike(x)
is_numlike = staticmethod(is_numlike)
class Registry(dict):
"""
register types with conversion interface
"""
def __init__(self):
dict.__init__(self)
self._cached = {}
def get_converter(self, x):
'get the converter interface instance for x, or None'
if not len(self): return None # nothing registered
#DISABLED idx = id(x)
#DISABLED cached = self._cached.get(idx)
#DISABLED if cached is not None: return cached
converter = None
classx = getattr(x, '__class__', None)
if classx is not None:
converter = self.get(classx)
if converter is None and iterable(x):
# if this is anything but an object array, we'll assume
# there are no custom units
if isinstance(x, np.ndarray) and x.dtype != np.object:
return None
for thisx in x:
converter = self.get_converter( thisx )
return converter
#DISABLED self._cached[idx] = converter
return converter
registry = Registry()
| gpl-3.0 |
annaritz/fly-interactome | interactome/weighted-interactome/parameter-optimize.py | 1 | 4407 | from __future__ import print_function
import sys
import os
## NetworkX imports
import numpy as np
import networkx as nx
from graph import Graph
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import glob
A_PARAMS = [0.05, 0.1, 0.5, 1.0, 1.5, 2.0]
W_PARAMS = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
def main(network,weighted_network,outprefix,force):
print('\nRead All Information from param-sweep-droid/.')
param_dir = 'param-sweep-droid/'
ev_file = 'param-sweep-droid-evtypes.txt'
ev_types = {} # {ev_type: (tot, overlap)}
max_val = 0
with open(ev_file) as fin:
for line in fin:
if line[0] == '#':
continue
row = line.strip().split()
row[0] = row[0].replace(':','-')
ev_types[row[0]] = (int(row[1]),int(row[2]))
max_val = max(0,float(int(row[1])/int(row[2])))
# normalize values to be between 0 and 1:
ev_type_weights = {} # {ev_type: weight for f}
for e in ev_types:
ev_type_weights[e] = (float(ev_types[e][0])/float(ev_types[e][1]))/max_val
print('%d evidence types' % (len(ev_types)))
scores = {} # scores[ev_type][a1][a2]
for ev_type in ev_types:
print(' reading in %s (weight = %.4f)' %(ev_type,ev_type_weights[ev_type]))
scores[ev_type] = {}
for a1 in A_PARAMS:
scores[ev_type][a1] = {}
for a2 in A_PARAMS:
scorefile=param_dir+'/'+ev_type+'/a1_%.3f_a2_%.3f_scores.txt' % (a1,a2)
scores[ev_type][a1][a2] = np.loadtxt(scorefile,skiprows=1,usecols=range(1,len(W_PARAMS)+1))
#print(scorefile)
#print(scores[ev_type][a1][a2])
#max_val = max(max_val,np.nanmax(scores[ev_type][a1][a2]))
print('\nCompute f for every parameter combination:')
max_val = -10000000
min_val = 0
tot_opts = 0
f = {} # f[a1][a2]
for a1 in A_PARAMS:
f[a1] = {}
for a2 in A_PARAMS:
f[a1][a2] = np.zeros((len(W_PARAMS),len(W_PARAMS)))
for ev_type in ev_types:
f[a1][a2] = np.add(f[a1][a2],ev_type_weights[ev_type]*np.log2(scores[ev_type][a1][a2]))
tot_opts += len(W_PARAMS)*len(W_PARAMS)/2
#print(ev_type)
#print(f[a1][a2])
max_val = max(max_val,np.nanmax(f[a1][a2]))
min_val = min(min_val,np.nanmin(f[a1][a2]))
print('Min Value =',(min_val))
print('Max Value =',(max_val))
param_combos = set() # set of (a1,a2,w1,w2) parameter combos that are optimal.
for a1 in A_PARAMS:
for a2 in A_PARAMS:
for i in range(len(W_PARAMS)):
for j in range(len(W_PARAMS)):
if f[a1][a2][i][j] == max_val:
print('OPTIMAL PARAM COMBO FOUNDS:',a1,a2,W_PARAMS[i],W_PARAMS[j])
param_combos.add((a1,a2,W_PARAMS[i],W_PARAMS[j]))
print ('%d parameter combinations out of %d are optimal.' % (len(param_combos),tot_opts))
out_dir = outprefix+"-full-weights/"
if not os.path.isdir(out_dir):
val = os.system('mkdir %s' %(out_dir))
if val != 0:
sys.exit()
print('\nCalculate weights and compute the interquartile range for every combo')
if os.path.isfile(outprefix+'iqr.txt') and not force:
print('File %s already exists -- skipping.' % (outprefix+'iqr.txt'))
else:
out = open(outprefix+'iqr.txt','w')
out.write('#a1\ta2\tw1\tw2\tiqr\n')
best_combo = None
best_val = 0
i = 0
for a1,a2,w1,w2 in param_combos:
print('i=%d:' % (i),a1,a2,w1,w2)
i+=1
cmd = 'python3 weight-edges.py -n %s -c -e %s -o %s --a1 %.3f --a2 %.3f --w1 %.3f --w2 %.3f > out.log' % \
(network,weighted_network,out_dir+'run',a1,a2,w1,w2)
#print(cmd)
val = os.system(cmd)
if val != 0:
sys.exit()
weight_file = out_dir+'run_w1_%.3f_w2_%.3f.txt' % (w1,w2)
these_weights = np.loadtxt(weight_file,skiprows=0,usecols=(2,))
q75, q25 = np.percentile(these_weights, [75 ,25])
iqr = q75 - q25
out.write('%.3f\t%.3f\t%.3f\t%.3f\t%e\n' % (a1,a2,w1,w2,iqr))
print(' q75=%f, q25=%f, IQR=%f' % (q75,q25,iqr))
if iqr > best_val:
best_val = iqr
best_combo = (a1,a2,w1,w2)
print(' BEST SO FAR')
out.flush()
out.close()
print("BEST IQR IS ",best_val)
print('BEST COMBO IS %.3f\t%.3f\t%.3f\t%.3f' % (a1,a2,w1,w2))
print('wrote to '+outprefix+'iqr.txt')
return
if __name__ == '__main__':
if len(sys.argv) != 4 and len(sys.argv) != 5:
sys.exit('USAGE: python3 parameter_sweep.py <INTERACTOME> <WEIGHTED_INTERACTOME> <OUTPREFIX> <FORCE-optional>')
network = sys.argv[1]
weighted_network = sys.argv[2]
outprefix = sys.argv[3]
if len(sys.argv)==5:
force=True
else:
force=False
main(network,weighted_network,outprefix,force)
| gpl-3.0 |
savoirfairelinux/odoo | addons/resource/faces/timescale.py | 170 | 3902 | ############################################################################
# Copyright (C) 2005 by Reithinger GmbH
# mreithinger@web.de
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
import faces.pcalendar as pcal
import matplotlib.cbook as cbook
import datetime
import sys
class TimeScale(object):
def __init__(self, calendar):
self.data_calendar = calendar
self._create_chart_calendar()
self.now = self.to_num(self.data_calendar.now)
def to_datetime(self, xval):
return xval.to_datetime()
def to_num(self, date):
return self.chart_calendar.WorkingDate(date)
def is_free_slot(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1 != dt2
def is_free_day(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1.date() != dt2.date()
def _create_chart_calendar(self):
dcal = self.data_calendar
ccal = self.chart_calendar = pcal.Calendar()
ccal.minimum_time_unit = 1
#pad worktime slots of calendar (all days should be equally long)
slot_sum = lambda slots: sum(map(lambda slot: slot[1] - slot[0], slots))
day_sum = lambda day: slot_sum(dcal.get_working_times(day))
max_work_time = max(map(day_sum, range(7)))
#working_time should have 2/3
sum_time = 3 * max_work_time / 2
#now create timeslots for ccal
def create_time_slots(day):
src_slots = dcal.get_working_times(day)
slots = [0, src_slots, 24*60]
slots = tuple(cbook.flatten(slots))
slots = zip(slots[:-1], slots[1:])
#balance non working slots
work_time = slot_sum(src_slots)
non_work_time = sum_time - work_time
non_slots = filter(lambda s: s not in src_slots, slots)
non_slots = map(lambda s: (s[1] - s[0], s), non_slots)
non_slots.sort()
slots = []
i = 0
for l, s in non_slots:
delta = non_work_time / (len(non_slots) - i)
delta = min(l, delta)
non_work_time -= delta
slots.append((s[0], s[0] + delta))
i += 1
slots.extend(src_slots)
slots.sort()
return slots
min_delta = sys.maxint
for i in range(7):
slots = create_time_slots(i)
ccal.working_times[i] = slots
min_delta = min(min_delta, min(map(lambda s: s[1] - s[0], slots)))
ccal._recalc_working_time()
self.slot_delta = min_delta
self.day_delta = sum_time
self.week_delta = ccal.week_time
_default_scale = TimeScale(pcal._default_calendar)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ihmeuw/vivarium | tests/framework/test_state_machine.py | 1 | 5788 | import pandas as pd
import numpy as np
from vivarium import InteractiveContext
from vivarium.framework.randomness import choice
from vivarium.framework.state_machine import Machine, State, Transition
def _population_fixture(column, initial_value):
class PopFixture:
@property
def name(self):
return f"test_pop_fixture_{column}_{initial_value}"
def setup(self, builder):
self.population_view = builder.population.get_view([column])
builder.population.initializes_simulants(self.inner, creates_columns=[column])
def inner(self, pop_data):
self.population_view.update(pd.Series(initial_value, index=pop_data.index))
return PopFixture()
def _even_population_fixture(column, values):
class pop_fixture:
@property
def name(self):
return "test_pop_fixture"
def setup(self, builder):
self.population_view = builder.population.get_view([column])
builder.population.initializes_simulants(self.inner, creates_columns=[column])
def inner(self, pop_data):
self.population_view.update(choice('start', pop_data.index, values))
return pop_fixture()
def test_transition():
done_state = State('done')
start_state = State('start')
start_state.add_transition(done_state)
machine = Machine('state', states=[start_state, done_state])
simulation = InteractiveContext(components=[machine, _population_fixture('state', 'start')])
event_time = simulation._clock.time + simulation._clock.step_size
machine.transition(simulation.get_population().index, event_time)
assert np.all(simulation.get_population().state == 'done')
def test_choice(base_config):
base_config.update({'population': {'population_size': 10000}})
a_state = State('a')
b_state = State('b')
start_state = State('start')
start_state.add_transition(a_state, probability_func=lambda agents: np.full(len(agents), 0.5))
start_state.add_transition(b_state, probability_func=lambda agents: np.full(len(agents), 0.5))
machine = Machine('state', states=[start_state, a_state, b_state])
simulation = InteractiveContext(components=[machine, _population_fixture('state', 'start')],
configuration=base_config)
event_time = simulation._clock.time + simulation._clock.step_size
machine.transition(simulation.get_population().index, event_time)
a_count = (simulation.get_population().state == 'a').sum()
assert round(a_count/len(simulation.get_population()), 1) == 0.5
def test_null_transition(base_config):
base_config.update({'population': {'population_size': 10000}})
a_state = State('a')
start_state = State('start')
start_state.add_transition(a_state, probability_func=lambda agents: np.full(len(agents), 0.5))
start_state.allow_self_transitions()
machine = Machine('state', states=[start_state, a_state])
simulation = InteractiveContext(components=[machine, _population_fixture('state', 'start')],
configuration=base_config)
event_time = simulation._clock.time + simulation._clock.step_size
machine.transition(simulation.get_population().index, event_time)
a_count = (simulation.get_population().state == 'a').sum()
assert round(a_count/len(simulation.get_population()), 1) == 0.5
def test_no_null_transition(base_config):
base_config.update({'population': {'population_size': 10000}})
a_state = State('a')
b_state = State('b')
start_state = State('start')
a_transition = Transition(start_state, a_state, probability_func=lambda index: pd.Series(0.5, index=index))
b_transition = Transition(start_state, b_state, probability_func=lambda index: pd.Series(0.5, index=index))
start_state.transition_set.allow_null_transition = False
start_state.transition_set.extend((a_transition, b_transition))
machine = Machine('state')
machine.states.extend([start_state, a_state, b_state])
simulation = InteractiveContext(components=[machine, _population_fixture('state', 'start')],
configuration=base_config)
event_time = simulation._clock.time + simulation._clock.step_size
machine.transition(simulation.get_population().index, event_time)
a_count = (simulation.get_population().state == 'a').sum()
assert round(a_count/len(simulation.get_population()), 1) == 0.5
def test_side_effects():
class DoneState(State):
@property
def name(self):
return "test_done_state"
def setup(self, builder):
super().setup(builder)
self.population_view = builder.population.get_view(['count'])
def _transition_side_effect(self, index, event_time):
pop = self.population_view.get(index)
self.population_view.update(pop['count'] + 1)
done_state = DoneState('done')
start_state = State('start')
start_state.add_transition(done_state)
done_state.add_transition(start_state)
machine = Machine('state', states=[start_state, done_state])
simulation = InteractiveContext(components=[machine, _population_fixture('state', 'start'),
_population_fixture('count', 0)])
event_time = simulation._clock.time + simulation._clock.step_size
machine.transition(simulation.get_population().index, event_time)
assert np.all(simulation.get_population()['count'] == 1)
machine.transition(simulation.get_population().index, event_time)
assert np.all(simulation.get_population()['count'] == 1)
machine.transition(simulation.get_population().index, event_time)
assert np.all(simulation.get_population()['count'] == 2)
| gpl-3.0 |
natj/bender | bender.py | 1 | 18533 | import sys
sys.path.append('/Users/natj/projects/arcmancer/lib/')
import pyarcmancer as pyac
import numpy as np
import matplotlib as mpl
#import matplotlib.pyplot as plt
from pylab import *
from matplotlib import cm
#from scipy import interpolate
import scipy.interpolate as interp
from joblib import Parallel, delayed
import multiprocessing
mpl.rc('font', family='serif')
mpl.rc('xtick', labelsize='small')
mpl.rc('ytick', labelsize='small')
gs = GridSpec(3, 3)
gs.update(hspace = 0.3)
#gs.update(wspace = 0.3)
num_cores = multiprocessing.cpu_count()
print "num of cores {}", num_cores
mpl.rcParams['image.cmap'] = 'inferno'
##################################################
#Setup star
R_in = 12.0
m_in = 1.4
freq_in = 400.0
colat_in = 10.0
spot_temp_in = 2.0
spot_ang_in = 10.0
spot_colat_in = 90.0
##################################################
#Planck units
## physical constants
## since c = G = 1, we get c^2/G = 0.677 Msun / km
L_to_M = 0.677
## c in kilometers per second
c = 299792.458
## G in km^3 kg^-1 second^-2
G = 1.327e11
## Stefan's constant in keV / (s cm^2 K^4)
sigma_sb = 35392.0
#pi from numpy
pi = np.pi
## conversion factors
kelvin_per_kev = 1.16045e7
km_per_kpc = 3.086e16
kg_per_Msun = 1.988435e30
# Operate in units where G = c = 1.
# Use units of solar masses
solar_mass_per_km = 0.6772
solar_mass_per_s = 2.03e5
#G=c=1 units (i.e., solar masses)
mass = m_in
R_eq = R_in * solar_mass_per_km / mass
angvel = freq_in * 2.0*np.pi / solar_mass_per_s * mass
compactness = np.sqrt(1 - 2/R_eq)
print "compactness=",compactness
##################################################
#convert spot quantities
spot_temp = spot_temp_in * kelvin_per_kev
spot_colat = spot_colat_in * pi/180.0
spot_ang = spot_ang_in * pi/180.0
#define luminosity distance of NS = 10 kpc
#luminosity_distance = 10.0 * km_per_kpc * 1e3 / planck_length_in_m
#Setup pyarcmancer
##################################################
conf = pyac.Configuration()
conf.absolute_tolerance = 1e-12 * R_eq
conf.relative_tolerance = 1e-12
conf.henon_tolerance = 1e-8
conf.sampling_interval = 1e-3
conf.minimum_stepsize = 1e-10 * R_eq
conf.maximum_steps = 10000
conf.enforce_maximum_stepsize = False
conf.enforce_minimum_stepsize = True
conf.enforce_maximum_steps = True
conf.store_only_endpoints = True
#Define metric of the spacetime
#metric = pyac.SchwarzschildMetric(2.0*mass/R_eq)
metric = pyac.AGMMetric(R_eq, 1.0, angvel, pyac.AGMMetric.MetricType.agm_standard)
#ns_surface = pyac.AGMSurface(R_eq, 1.0, angvel, pyac.AGMSurface.SurfaceType.spherical)
ns_surface = pyac.AGMSurface(R_eq, 1.0, angvel, pyac.AGMSurface.SurfaceType.oblate)
surfaces = [ ns_surface ]
#pyac.Log.set_console()
pyac.Log.set_file()
##################################################
# Construct image plane
inclination = np.deg2rad(colat_in)
distance = mass * 100
x_span = 1.5*R_eq
y_span = 1.5*R_eq
x_bins = 200
y_bins = 200
pixel_dx = 2*x_span / x_bins
pixel_dy = 2*y_span / y_bins
pixel_area = pixel_dx * pixel_dy
initial_xs = np.linspace(-x_span, x_span, x_bins)
initial_ys = np.linspace(-y_span, y_span, y_bins)
# construct local spherical axes in cartesian coordinates
def local_spherical_axes(pos_cart):
#print "Computing cartesian components of local spherical axes at {}".format(pos_cart)
u_r = pos_cart / np.linalg.norm(pos_cart)
u_phi = np.cross(np.array([0,0,1]), u_r)
u_phi /= np.linalg.norm(u_phi)
u_theta = -np.cross(u_r, u_phi)
u_theta /= np.linalg.norm(u_theta)
#print "result {} {} {}".format(u_r, u_theta, u_phi)
return [u_r, u_theta, u_phi]
# in the limit m -> 0, BL coordinates go to oblate spheroidal minkowski. These
# go to minkowski for r -> inf or a ->0
def boyer_lindquist_position(x_cart):
#print "Transforming cartesian position {} to Boyer-Lindquist".format(x_cart)
x, y, z = x_cart
r = np.linalg.norm(x_cart)
theta = np.arccos(z/r)
phi = np.arctan2(y, x)
return np.array([0, r, theta, phi])
# in the limit m = 0
def cartesian_position(a, x_bl):
r, theta, phi = x_bl[1:]
x = np.hypot(r, a) * np.sin(theta) * np.cos(phi)
y = np.hypot(r, a) * np.sin(theta) * np.sin(phi)
z = r*np.cos(theta)
return np.array([x_bl[0], x, y, z])
# Initialize photon with some (x,y) coordinates in the _image plane_
# and make it point towards the neutron star
def xy2geo(metric, distance, inclination, x, y):
#print "Initializing geodesic for {},{}".format(x,y)
# get coordinates for position of image plane point
normal = np.array([np.sin(inclination), 0.0, np.cos(inclination)])
x_cart = np.array([-y*np.cos(inclination), x, y*np.sin(inclination)])
#x_cart = np.array([ x, -y*np.cos(inclination), y*np.sin(inclination) ] )
x_cart += distance * normal
x_sph = boyer_lindquist_position(x_cart)
# get velocity by projecting to local B-L axes
# get axes in _cartesian_ coordinates
u_r, u_theta, u_phi = local_spherical_axes(x_cart)
vel_cart = normal
vel_sph = np.array([0,
np.dot(u_r , vel_cart) ,
np.dot(u_theta , vel_cart) / x_sph[1],
np.dot(u_phi , vel_cart) / (x_sph[1] * np.sin(x_sph[2]))])
# define vertical and horizontal
vert = pyac.normalize(metric, x_sph, np.array([0, 0, -1.0, 0]))
vert_vel = pyac.project_along(metric, x_sph, vel_sph, vert)
vert -= vert_vel
vert = pyac.normalize(metric, x_sph, vert)
horz = pyac.spatial_cross_product(
metric, pyac.static_observer(metric, x_sph), vert, vel_sph)
horz = pyac.normalize(metric, x_sph, horz)
horz = pyac.normalize(metric, x_sph, np.array([0, 0, 0, 1.0]))
if 0:
# test
print "vert", vert, "horz", horz
print "vert.horz", metric.dot(x_sph, vert, horz)
print "vert.u", metric.dot(x_sph, vert, vel_sph)
print "horz.u", metric.dot(x_sph, horz, vel_sph)
geo = pyac.Geodesic(metric, x_sph, vel_sph, vert, horz, pyac.VectorType.null)
return geo
#polar coordinates to photon in image plane
def pol2geo(metric, distance, inclination, rad, chi):
x = rad * np.sin(chi)
y = rad * np.cos(chi)
return xy2geo(metric, distance, inclination, x, y)
# generate an image plane of geodesics
def generate_image_plane(metric, distance, inclination, x_span, y_span, x_bins, y_bins):
xs = initial_xs
ys = initial_ys
# avoid line of poles?
#xs += 0.1*pixel_dx
plane = []
for ix, x in enumerate(xs):
for iy, y in enumerate(ys):
#print "ix", ix, "x", x
#print "iy", iy, "y", y
#Make photon from (x,y) coords
geo = xy2geo(metric, distance, inclination, x, y)
plane.append((ix, iy, x, y, geo))
return plane
def compute_element(el, distance, metric, conf, surfaces):
el[4].compute(-(distance + 5.0*R_eq), metric, conf, surfaces)
# black-body specific intensity in natural units (h = 1)
def bb_intensity(nu, T):
return 2 * nu**3 * ( np.exp( nu/T ) - 1 )**(-1)
class Pixel:
ix = 0
iy = 0
def __init__(self, x, y):
self.x = x
self.y = y
#self.geo = geo
##################################################
#Find star radius boundaries
def find_boundaries(metric, distance, inclination, surfaces):
print "Finding edge boundaries for the star..."
Nedge = 10
chis = np.linspace(0.0, 1.0, Nedge)*2.0*pi + 0.001
rlims = np.zeros(Nedge)
rmin = 0.0
rmax = 12.0
for i, chii in enumerate(chis):
geos = []
rmini = rmin
rmaxi = rmax
rmid = 0.0
Nbi = 20
N = 0
#for N in range(Nbi):
relerr = 1.0
reltol = 1e-3
rmid_old = 100.0
while (N < Nbi) and (relerr > reltol):
rmid = (rmini + rmaxi)/2.0
geos.append((0,0,0,0, pol2geo(metric, distance, inclination, rmid, chii) ))
compute_element(geos[N], distance, metric, conf, surfaces)
hit = geos[N][4].front_termination().hit_surface
if hit:
rmini = rmid
else:
rmaxi = rmid
relerr = np.abs(rmid - rmid_old)/rmid
rmid_old = rmid
N += 1
print "Iterating edge at {} after {} tries for angle={} ({})".format(rmid, N, chii, relerr)
rlims[i] = rmid
return chis, rlims
##################################################
# Creates internal polar grid for the interpolation
def internal_polar_grid(Nrad, Nchi):
dchi_edge = 0.001
chimin = 0.0 - dchi_edge
chimax = 2.0*pi + dchi_edge
chi_diffs = 0.8 + np.sin( np.linspace(0.0, 2.0*pi, Nchi-3) )**2
chi_diffs = np.insert(chi_diffs, 0, 0.0)
chi_grid = chimin + (chimax - chimin) * np.cumsum(chi_diffs)/np.sum(chi_diffs)
chi_grid = np.insert(chi_grid, 0, chi_grid[0] - dchi_edge)
chi_grid = np.append(chi_grid, chi_grid[-1] + dchi_edge)
#chi_grid = np.linspace(0.0, 2.0*pi, Nchi)
rad_diffs = 1.0 / np.exp( np.linspace(1.2, 2.0, Nrad-1)**2)
rad_grid = rmax * np.cumsum(rad_diffs)/np.sum(rad_diffs)
rad_grid = np.insert(rad_grid, 0, 0.001)
grid = np.empty((Nrad,Nchi), dtype=np.object)
for i, chi in enumerate(chi_grid):
print "{} % done".format(float(i)/len(chi_grid) * 100)
for j, rad in enumerate(rad_grid):
print " tracing geodesic at chi={} and r={}".format(chi, rad)
#Trace geodesic from image plane to star
grid[j,i] = pol2geo(metric, distance, inclination, rad, chi)
grid[j,i].compute(-(distance + 5.0*R_eq), metric, conf, surfaces)
return rad_grid, chi_grid, grid
#Reduce geodesic into auxiliary quantities
def dissect_geos(grid, rad_grid, chi_grid):
print "Dissecting geodesic paths to observed quantities..."
Nrad, Nchi = np.shape(grid)
Reds = np.zeros((Nrad, Nchi))
Cosas = np.zeros((Nrad, Nchi))
Times = np.zeros((Nrad, Nchi))
Thetas = np.zeros((Nrad, Nchi))
Phis = np.zeros((Nrad, Nchi))
for i, chi in enumerate(chi_grid):
#print "{} % done".format(float(i)/len(chi_grid) * 100)
for j, rad in enumerate(rad_grid):
geo = grid[j,i]
hit_pt = geo.get_points()[0]
obs_pt = geo.get_points()[-1]
#coordinates
#surface_point.point.x
t = Times[j,i] = hit_pt.point.x[0]
th = Thetas[j,i] = hit_pt.point.x[2]
p = Phis[j,i] = hit_pt.point.x[3]
hit = geo.front_termination().hit_surface
#Redshift
g = Reds[j,i] = \
metric.dot(obs_pt.point, pyac.static_observer(metric, obs_pt.x())) / \
metric.dot(hit_pt.point, ns_surface.observer(metric, hit_pt.x()))
#hit angle
cosa = Cosas[j,i] = \
geo.front_termination().observer_hit_angle
return Reds, Cosas, Times, Thetas, Phis
def mod2pi(x):
#return np.fmod(x, 2*np.pi)
while (x > 2.0*np.pi):
x -= 2.0*np.pi
while (x < 0.0):
x += 2.0*np.pi
return x
def calc_chi(x,y):
return mod2pi(np.pi/2.0 - np.arctan2(y,x) )
#return mod2pi( np.arctan2(y,x) )
#Interpolation function
# We take x and y grid values in polar coordinates
# New points (that we are interpolating into) are
# in Cartesian (x,y) coordinates.
# New points are then first transformed to polar coordinates for querying.
def grid_interpolate(rads, chis, z,
x2, y2,
edge_inter):
##rads = np.hypot(x, y)
##chis = np.arctan2(y, x)
##rads2 = np.hypot(x2, y2)
##chis2 = np.arctan2(y2, x2)
#x_sparse, y_sparse = np.meshgrid(rads, chis)
##y_sparse, x_sparse = np.meshgrid(rads, chis)
##x_dense, y_dense = np.meshgrid(rads2, chis2)
##new way of transforming only after the unpacking of mesh
#x_dense, y_dense = np.meshgrid(x2, y2)
#z2 = interp.griddata(np.array([x_sparse.ravel(),y_sparse.ravel()]).T,
# z.ravel(),
# (x_dense,y_dense), method='linear')
#print np.shape(z)
#print np.shape(rads), min(rads), max(rads)
#print np.shape(chis), min(chis), max(chis)
#Using spline; XXX is this a good idea, I dunno!
ir = interp.RectBivariateSpline(rads, chis, z,
#bbox=[0.01, max(rads), 0.0, 2*np.pi],
kx=2, ky=2, s=0)
#build interpolated array
z2 = np.zeros((len(x2), len(y2)))
for i, xi in enumerate(x2):
for j, yi in enumerate(y2):
radi = np.hypot(xi, yi)
#chii = np.arctan2(yi, xi)
chii = calc_chi(xi, yi)
#Check radius
redge = edge_inter(chii)
if radi <= redge:
z2[i,j] = ir.ev(radi, chii)
#z2[i,j] = chii
return z2
##################################################
##################################################
#Locate edges
chis, rlims = find_boundaries(metric, distance, inclination, surfaces)
#rlims = 8.04
rmax = np.max(rlims)*1.001
print "Maximum edge {}".format(rmax)
#Build edge location interpolator
edge_inter_raw = interp.InterpolatedUnivariateSpline(chis, rlims)
#Build internal coarse grid for the interpolation routines
rad_grid, chi_grid, coarse_polar_grid = internal_polar_grid(30, 30)
#Read observed values from geodesics
reds_int, cosas_int, times_int, thetas_int, phis_int = dissect_geos(coarse_polar_grid, rad_grid, chi_grid)
#Interpolate everything into thick grid
redshift = grid_interpolate(rad_grid, chi_grid, reds_int,
initial_xs, initial_ys, edge_inter_raw)
obs_hit_angle = grid_interpolate(rad_grid, chi_grid, cosas_int,
initial_xs, initial_ys, edge_inter_raw)
times = grid_interpolate(rad_grid, chi_grid, times_int,
initial_xs, initial_ys, edge_inter_raw)
thetas = grid_interpolate(rad_grid, chi_grid, thetas_int,
initial_xs, initial_ys, edge_inter_raw)
phis = grid_interpolate(rad_grid, chi_grid, phis_int,
initial_xs, initial_ys, edge_inter_raw)
# Computes lineprofile
def lineprofile(fluxc, redsc):
print "Computing line profile..."
xarr = np.array([])
yarr = np.array([])
Ny_dense, Nx_dense = np.shape(fluxc)
energ = 1.0
for jj in range(Ny_dense):
for ii in range(Nx_dense):
fy = fluxc[jj, ii]
xii = redsc[jj, ii]
if xii > 0.0:
xarr = np.append(xarr, xii*energ)
yarr = np.append(yarr, fy)
xind = np.argsort(xarr)
xarrs = xarr[xind]
yarrs = yarr[xind]
NN = len(xarrs)
emin = np.min(xarrs)*0.99
emax = np.max(xarrs)*1.01
Nr = 100
es = np.linspace(emin, emax, Nr)
yy2 = np.zeros((Nr))
xst = 0
for ii in range(1,Nr):
for jj in range(xst, NN):
if es[ii-1] <= xarrs[jj] < es[ii]:
yy2[ii] += yarrs[jj]
elif xarrs[jj] >= es[ii]:
xst = jj
break
#normalize
des = np.diff(es)[1]
yy2 = yy2 / np.sum(yy2*des)
return es, yy2
es, yy2 = lineprofile(redshift**3, redshift)
es = es/compactness
##################################################
# plot values on image plane
def trans(mat):
return np.flipud(mat.T)
#return mat
def detrans(mat):
return np.flipud(mat).T
def clean_image(mat):
#mask all 0.0 elements and transpose
mat_masked = np.ma.masked_where(mat == 0, mat)
return trans(mat_masked)
#return mat_masked
#build up a chess board layering from phi and theta coordinates
def chess_layer(phis, thetas):
none = 0
white = 1
black = 2
Nx, Ny = np.shape(phis)
mat = np.zeros((Nx,Ny))
for i in range(Nx):
for j in range(Ny):
phi = phis[i,j]
theta = thetas[i,j]
if (phi == 0.0) and (theta == 0):
mat[i,j] = none
continue
xd = np.int(60.0*phi/(2*pi))
yd = np.int(60.0*theta/(2*pi))
if (xd & 1) and (yd & 1):
mat[i,j] = black
elif (xd & 0) and (yd & 0):
mat[i,j] = black
else:
mat[i,j] = white
return mat
# transform everything to mesh with imshow
chess = chess_layer(phis, thetas)
obs_hit_angle = clean_image(obs_hit_angle)
redshift = clean_image(redshift)
times = clean_image(times)
thetas = clean_image(thetas)
phis = clean_image(phis)
chess = clean_image(chess)
# other settings for imshow
extent=( initial_xs[0], initial_xs[-1], initial_ys[0], initial_ys[-1])
interpolation = 'nearest'
ax = subplot(gs[0:2,0:2])
ax.axis('off')
ax.imshow(chess, interpolation=interpolation, extent=extent, cmap=cm.get_cmap('Greys'), vmin=0.8, vmax=2.0, alpha=0.6)
ax.imshow(redshift, interpolation=interpolation, origin='lower', extent=extent,
cmap=cm.get_cmap('coolwarm_r'), vmin=0.8*compactness, vmax=1.2*compactness, alpha=0.95)
ax.contour(redshift, 10, hold='on', colors='w',
origin='lower', extent=extent, vmin=0.8*compactness, vmax=1.2*compactness)
ax = subplot(gs[2,0])
ax.minorticks_on()
cax = ax.imshow(obs_hit_angle, interpolation=interpolation, extent=extent)
colorbar(cax)
ax.set_title(r'emitter angle $\alpha$')
ax = subplot(gs[2,1])
ax.minorticks_on()
cax = ax.imshow(redshift, interpolation=interpolation, origin='lower', extent=extent,
cmap=cm.get_cmap('coolwarm_r'), vmin=0.8*compactness, vmax=1.2*compactness)
ax.contour(redshift, 20, hold='on', colors='w',
origin='lower', extent=extent, vmin=0.8*compactness, vmax=1.2*compactness)
colorbar(cax)
ax.set_title('redshift')
ax = subplot(gs[0,2])
ax.minorticks_on()
cax = ax.imshow(phis, interpolation=interpolation, extent=extent)
colorbar(cax)
ax.set_title(r'$\phi$')
ax = subplot(gs[1,2])
ax.minorticks_on()
cax = ax.imshow(thetas, interpolation=interpolation, extent=extent)
colorbar(cax)
ax.set_title(r'$\theta$')
ax = subplot(gs[2,2])
ax.plot(es, yy2, "b-")
ax.set_title(r'line profile')
#plt.tight_layout()
#plt.show()
savefig('arcmancer_debug.png')
| mit |
blaisb/cfdemUtilities | dem/rdfAnalysis/plotMultRdf.py | 1 | 1745 | # This plots the RDF of a LIGGGHTS post-processed analysis
#
# USAGE : python ./file
#
# Author : Bruno Blais
#Python imports
#----------------
import os
import sys
import numpy
import matplotlib.pyplot as plt
#----------------
#********************************
# OPTIONS AND USER PARAMETERS
#********************************
pdf=True
dp=3e-3
dpAdim=True
leg=["Initial configuration" , "3 s." , "6 s." , "10 s.", "30 s.", "50 s."]
#Figure size
plt.rcParams['figure.figsize'] = 10, 7
params = {'backend': 'ps',
'axes.labelsize': 24,
'text.fontsize': 16,
'legend.fontsize': 18,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
'text.usetex': True,
}
plt.rcParams.update(params)
#======================
# MAIN
#======================
tFold= 0
#Read the logs files
if (len(sys.argv)<2):
print 'Files must be specified when running this python script'
sys.exit("Crashed because files were not specified")
n,x,rdf,cord = numpy.loadtxt(sys.argv[1], unpack=True,skiprows=4)
# Plot
fig, ax1 = plt.subplots()
plt.ylabel('Radial distribution function ')
if(dpAdim):
plt.xlabel('Dimensionless distance $\mathbf{x}/d_{p}$')
else:
plt.xlabel('Distance $\mathbf{x}$')
ax1.set_ylabel('Radial distribution function g(r)')
for i in range(1,len(sys.argv)):
print sys.argv[i]
n,x,rdf,cord = numpy.loadtxt(sys.argv[i], unpack=True,skiprows=4)
if (dpAdim):
if (i==6):
ax1.plot(x/dp,rdf,'k',linewidth=2.0,label=leg[i-1])
else:
ax1.plot(x/dp,rdf,linewidth=2.0,label=leg[i-1])
else:
ax1.plot(x,rdf,linewidth=2.0)
if (pdf): plt.savefig("./rdf.pdf")
plt.legend()
plt.show()
| lgpl-3.0 |
kianho/mpl_preamble | examples/stock_prices.py | 1 | 1615 | #!/usr/bin/env python
# encoding: utf-8
"""
Date:
Sun Feb 15 21:57:25 AEDT 2015
Author:
Kian Ho <hui.kian.ho@gmail.com>
Description:
TODO
Usage:
stock_prices.py
"""
from __future__ import print_function
import datetime
import mpl_preamble
from pandas.io import data
from mpl_preamble import mpl, pylab
import matplotlib.ticker as ticker
def ytick_format_func(x, pos):
return "%.2f" % x
if __name__ == '__main__':
df = data.DataReader("FB", "google",
start="2013-01-01")
xs = df.index.to_pydatetime()
ys = df.Open
pylab.plot(xs, ys, drawstyle="steps", label="FB")
df = data.DataReader("TWTR", "google",
start="2013-01-01")
xs = df.index.to_pydatetime()
ys = df.Open
pylab.plot(xs, ys, drawstyle="steps", label="TWTR")
ax = pylab.gca()
# x-tick labels in the "%b '%y" python datetime string format.
formatter = mpl.dates.DateFormatter("%b '%y")
ax.xaxis.set_major_formatter(formatter)
# monthly x-tick labels.
ax.xaxis.set_major_locator(mpl.dates.MonthLocator(interval=3))
# y-tick labels in the "%.2f" format.
ax.yaxis.set_major_formatter(ticker.FuncFormatter(ytick_format_func))
ax.xaxis.grid(True, color="grey")
ax.yaxis.grid(True, color="grey")
# place the grid below each series line.
ax.set_axisbelow(True)
# rotate and right-justify the x-tick labels.
pylab.xticks(rotation=30, ha="right")
pylab.legend(loc="upper left", ncol=1)
pylab.xlabel("Date")
pylab.ylabel("Price (USD)")
pylab.savefig("stock_prices.pdf", bbox_inches="tight")
| mit |
pletisan/python-data-viz-cookbook | 3367OS_Code/3367OS_07_Code/ch07/ch07_rec01_understand_log_plot.py | 1 | 1045 | from matplotlib import pyplot as plt
import numpy as np
x = np.linspace(1, 10)
y = [10 ** el for el in x]
z = [2 * el for el in x]
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(2, 2, 1)
ax1.plot(x, y, color='blue')
ax1.set_yscale('log')
ax1.set_title(r'Logarithmic plot of $ {10}^{x} $ ')
ax1.set_ylabel(r'$ {y} = {10}^{x} $')
plt.grid(b=True, which='both', axis='both')
ax2 = fig.add_subplot(2, 2, 2)
ax2.plot(x, y, color='red')
ax2.set_yscale('linear')
ax2.set_title(r'Linear plot of $ {10}^{x} $ ')
ax2.set_ylabel(r'$ {y} = {10}^{x} $')
plt.grid(b=True, which='both', axis='both')
ax3 = fig.add_subplot(2, 2, 3)
ax3.plot(x, z, color='green')
ax3.set_yscale('log')
ax3.set_title(r'Logarithmic plot of $ {2}*{x} $ ')
ax3.set_ylabel(r'$ {y} = {2}*{x} $')
plt.grid(b=True, which='both', axis='both')
ax4 = fig.add_subplot(2, 2, 4)
ax4.plot(x, z, color='magenta')
ax4.set_yscale('linear')
ax4.set_title(r'Linear plot of $ {2}*{x} $ ')
ax4.set_ylabel(r'$ {y} = {2}*{x} $')
plt.grid(b=True, which='both', axis='both')
plt.show()
| mit |
treycausey/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
magnunor/hyperspy | hyperspy/misc/rgb_tools.py | 5 | 2801 | import numpy as np
from dask.array import Array
rgba8 = np.dtype({'names': ['R', 'G', 'B', 'A'],
'formats': ['u1', 'u1', 'u1', 'u1']})
rgb8 = np.dtype({'names': ['R', 'G', 'B'],
'formats': ['u1', 'u1', 'u1']})
rgba16 = np.dtype({'names': ['R', 'G', 'B', 'A'],
'formats': ['u2', 'u2', 'u2', 'u2']})
rgb16 = np.dtype({'names': ['R', 'G', 'B'],
'formats': ['u2', 'u2', 'u2']})
rgb_dtypes = {
'rgb8': rgb8,
'rgb16': rgb16,
'rgba8': rgba8,
'rgba16': rgba16}
def is_rgba(array):
if array.dtype in (rgba8, rgba16):
return True
else:
return False
def is_rgb(array):
if array.dtype in (rgb8, rgb16):
return True
else:
return False
def is_rgbx(array):
if is_rgb(array) or is_rgba(array):
return True
else:
return False
def rgbx2regular_array(data, plot_friendly=False):
"""Transforms a RGBx array into a standard one
Parameters
----------
data : numpy array of RGBx dtype
plot_friendly : bool
If True change the dtype to float when dtype is not uint8 and
normalize the array so that it is ready to be plotted by matplotlib.
"""
# Make sure that the data is contiguous
if isinstance(data, Array):
from dask.diagnostics import ProgressBar
# an expensive thing, but nothing to be done for now...
with ProgressBar():
data = data.compute()
if data.flags['C_CONTIGUOUS'] is False:
if np.ma.is_masked(data):
data = data.copy(order='C')
else:
data = np.ascontiguousarray(data)
if is_rgba(data) is True:
dt = data.dtype.fields['B'][0]
data = data.view((dt, 4))
elif is_rgb(data) is True:
dt = data.dtype.fields['B'][0]
data = data.view((dt, 3))
else:
return data
if plot_friendly is True and data.dtype == np.dtype("uint16"):
data = data.astype("float")
data /= 2 ** 16 - 1
return data
def regular_array2rgbx(data):
# Make sure that the data is contiguous
if data.flags['C_CONTIGUOUS'] is False:
if np.ma.is_masked(data):
data = data.copy(order='C')
else:
data = np.ascontiguousarray(data)
if data.shape[-1] == 3:
names = rgb8.names
elif data.shape[-1] == 4:
names = rgba8.names
else:
raise ValueError("The last dimension size of the array must be 3 or 4")
if data.dtype in (np.dtype("u1"), np.dtype("u2")):
formats = [data.dtype] * len(names)
else:
raise ValueError("The data dtype must be uint16 or uint8")
return data.view(np.dtype({"names": names,
"formats": formats})).reshape(data.shape[:-1])
| gpl-3.0 |
btabibian/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 87 | 3903 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.externals.joblib import Memory
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_.reshape(1, -1))
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
mikewiebe-ansible/ansible | hacking/cgroup_perf_recap_graph.py | 54 | 4384 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2018, Matt Martz <matt@sivel.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import argparse
import csv
from collections import namedtuple
try:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
except ImportError:
raise SystemExit('matplotlib is required for this script to work')
Data = namedtuple('Data', ['axis_name', 'dates', 'names', 'values'])
def task_start_ticks(dates, names):
item = None
ret = []
for i, name in enumerate(names):
if name == item:
continue
item = name
ret.append((dates[i], name))
return ret
def create_axis_data(filename, relative=False):
x_base = None if relative else 0
axis_name, dummy = os.path.splitext(os.path.basename(filename))
dates = []
names = []
values = []
with open(filename) as f:
reader = csv.reader(f)
for row in reader:
if x_base is None:
x_base = float(row[0])
dates.append(mdates.epoch2num(float(row[0]) - x_base))
names.append(row[1])
values.append(float(row[3]))
return Data(axis_name, dates, names, values)
def create_graph(data1, data2, width=11.0, height=8.0, filename='out.png', title=None):
fig, ax1 = plt.subplots(figsize=(width, height), dpi=300)
task_ticks = task_start_ticks(data1.dates, data1.names)
ax1.grid(linestyle='dashed', color='lightgray')
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%X'))
ax1.plot(data1.dates, data1.values, 'b-')
if title:
ax1.set_title(title)
ax1.set_xlabel('Time')
ax1.set_ylabel(data1.axis_name, color='b')
for item in ax1.get_xticklabels():
item.set_rotation(60)
ax2 = ax1.twiny()
ax2.set_xticks([x[0] for x in task_ticks])
ax2.set_xticklabels([x[1] for x in task_ticks])
ax2.grid(axis='x', linestyle='dashed', color='lightgray')
ax2.xaxis.set_ticks_position('bottom')
ax2.xaxis.set_label_position('bottom')
ax2.spines['bottom'].set_position(('outward', 86))
ax2.set_xlabel('Task')
ax2.set_xlim(ax1.get_xlim())
for item in ax2.get_xticklabels():
item.set_rotation(60)
ax3 = ax1.twinx()
ax3.plot(data2.dates, data2.values, 'g-')
ax3.set_ylabel(data2.axis_name, color='g')
fig.tight_layout()
fig.savefig(filename, format='png')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('files', nargs=2, help='2 CSV files produced by cgroup_perf_recap to graph together')
parser.add_argument('--relative', default=False, action='store_true',
help='Use relative dates instead of absolute')
parser.add_argument('--output', default='out.png', help='output path of PNG file: Default %s(default)s')
parser.add_argument('--width', type=float, default=11.0,
help='Width of output image in inches. Default %(default)s')
parser.add_argument('--height', type=float, default=8.0,
help='Height of output image in inches. Default %(default)s')
parser.add_argument('--title', help='Title for graph')
return parser.parse_args()
def main():
args = parse_args()
data1 = create_axis_data(args.files[0], relative=args.relative)
data2 = create_axis_data(args.files[1], relative=args.relative)
create_graph(data1, data2, width=args.width, height=args.height, filename=args.output, title=args.title)
print('Graph written to %s' % os.path.abspath(args.output))
if __name__ == '__main__':
main()
| gpl-3.0 |
ywcui1990/nupic.research | projects/wavelet_dataAggregation/runDataAggregationExperiment.py | 11 | 21206 | from os.path import isfile, join, exists
import pandas as pd
import numpy as np
from scipy import signal
import numpy.matlib
import csv
import os
import time
os.environ['TZ'] = 'GMT'
time.tzset()
display = True
if display:
import matplotlib.pyplot as plt
plt.close('all')
plt.ion()
def plotWaveletPower(sig, cwtmatr, time_scale, x_range=None, title=''):
"""
Display wavelet transformations along with the original data
:param sig: original sigal
:param cwtmatr: cwt coefficients
:param time_scale: time scales of wavelets
:param x_range: x range of the plot
:param title: title of the plot
"""
if x_range is None:
x_range = range(0, cwtmatr.shape[1])
fig, ax = plt.subplots(nrows=2, ncols=1)
y_time_scale_tick = ['1-sec', '1mins', '5mins', '30mins', '60mins', '2hrs', '4hrs', '12hrs', '1day', '1week']
y_time_scale = [1, 60, 300, 1800, 3600, 7200, 14400, 43200, 86400, 604800]
y_tick = (np.log10(y_time_scale) - np.log10(time_scale[0]) ) / \
(np.log10(time_scale[-1]) - np.log10(time_scale[0])) * (len(time_scale)-1)
good_tick = np.where(np.logical_and(y_tick >= 0, y_tick < len(time_scale)))[0]
y_tick = y_tick[good_tick]
y_time_scale_tick = [y_time_scale_tick[i] for i in good_tick]
ax[0].imshow(np.abs(cwtmatr[:, x_range]), aspect='auto')
ax[0].set_yticks(y_tick)
ax[0].set_yticklabels(y_time_scale_tick)
ax[0].set_xlabel(' Time ')
ax[0].set_title(title)
ax[1].plot(sig[x_range])
ax[1].set_xlabel(' Time ')
ax[1].autoscale(tight=True)
plt.show()
def calculate_cwt(sampling_interval, sig, figDir='./', fileName='./', display=True):
"""
Calculate continuous wavelet transformation (CWT)
Return variance of the cwt coefficients overtime and its cumulative
distribution
:param sampling_interval: sampling interval of the time series
:param sig: value of the time series
:param figDir: directory of cwt plots
:param fileName: name of the dataset, used for determining figDir
:param display: whether to create the cwt plot
"""
t = np.array(range(len(sig)))*sampling_interval
widths = np.logspace(0, np.log10(len(sig)/20), 50)
T = int(widths[-1])
# continulus wavelet transformation with ricker wavelet
cwtmatr = signal.cwt(sig, signal.ricker, widths)
cwtmatr = cwtmatr[:, 4*T:-4*T]
sig = sig[4*T:-4*T]
t = t[4*T:-4*T]
freq = 1/widths.astype('float') / sampling_interval / 4
time_scale = widths * sampling_interval * 4
# variance of wavelet power
cwt_var = np.var(np.abs(cwtmatr), axis=1)
cwt_var = cwt_var/np.sum(cwt_var)
cum_cwt_var = np.cumsum(cwt_var)
(useTimeOfDay, useDayOfWeek, local_min, local_max, strong_local_max) = get_local_maxima(cwt_var, time_scale)
if not exists(figDir):
os.makedirs(figDir)
if display:
# plot wavelet coefficients along with the raw signal
plt.close('all')
plotWaveletPower(sig, cwtmatr, time_scale)
plt.savefig(join(figDir, fileName + 'wavelet_transform.pdf'))
fig, axs = plt.subplots(nrows=2, ncols=1)
ax = axs[0]
ax.plot(time_scale, cwt_var, '-o')
ax.axvline(x=86400, color='c')
ax.axvline(x=604800, color='c')
for _ in xrange(len(local_max)):
ax.axvline(x=time_scale[local_max[_]], color='r')
for _ in xrange(len(strong_local_max)):
ax.axvline(x=time_scale[strong_local_max[_]], color='k')
for _ in xrange(len(local_min)):
ax.axvline(x=time_scale[local_min[_]], color='b')
ax.set_xscale('log')
ax.set_xlabel(' Time Scale (sec) ')
ax.set_ylabel(' Variance of Power')
ax.autoscale(tight='True')
ax.set_title(fileName)
ax = axs[1]
ax.plot(time_scale, cum_cwt_var, '-o')
ax.set_xscale('log')
ax.set_xlabel(' Time Scale (sec) ')
ax.set_ylabel(' Accumulated Variance of Power')
ax.autoscale(tight='True')
plt.title(['useTimeOfDay: '+str(useTimeOfDay)+' useDayOfWeek: '+str(useDayOfWeek)])
plt.savefig(join(figDir, fileName + 'aggregation_time_scale.pdf'))
return cum_cwt_var, cwt_var, time_scale
def get_local_maxima(cwt_var, time_scale):
"""
Find local maxima from the wavelet coefficient variance spectrum
A strong maxima is defined as
(1) At least 10% higher than the nearest local minima
(2) Above the baseline value
"""
# peak & valley detection
local_min = (np.diff(np.sign(np.diff(cwt_var))) > 0).nonzero()[0] + 1
local_max = (np.diff(np.sign(np.diff(cwt_var))) < 0).nonzero()[0] + 1
baseline_value = 1.0/len(cwt_var)
dayPeriod = 86400.0
weekPeriod = 604800.0
cwt_var_at_dayPeriod = np.interp(dayPeriod, time_scale, cwt_var)
cwt_var_at_weekPeriod = np.interp(weekPeriod, time_scale, cwt_var)
useTimeOfDay = False
useDayOfWeek = False
strong_local_max = []
for i in xrange(len(local_max)):
left_local_min = np.where(np.less(local_min, local_max[i]))[0]
if len(left_local_min) == 0:
left_local_min = 0
left_local_min_value = cwt_var[0]
else:
left_local_min = local_min[left_local_min[-1]]
left_local_min_value = cwt_var[left_local_min]
right_local_min = np.where(np.greater(local_min, local_max[i]))[0]
if len(right_local_min) == 0:
right_local_min = len(cwt_var)-1
right_local_min_value = cwt_var[-1]
else:
right_local_min = local_min[right_local_min[0]]
right_local_min_value = cwt_var[right_local_min]
local_max_value = cwt_var[local_max[i]]
nearest_local_min_value = np.max(left_local_min_value, right_local_min_value)
if ( (local_max_value - nearest_local_min_value)/nearest_local_min_value > 0.1 and
local_max_value > baseline_value):
strong_local_max.append(local_max[i])
if (time_scale[left_local_min] < dayPeriod and
dayPeriod < time_scale[right_local_min] and
cwt_var_at_dayPeriod > local_max_value/2.0):
# if np.abs(dayPeriod - time_scale[local_max[i]])/dayPeriod < 0.5:
useTimeOfDay = True
if (time_scale[left_local_min] < weekPeriod and
weekPeriod < time_scale[right_local_min] and
cwt_var_at_weekPeriod > local_max_value/2.0):
# if np.abs(weekPeriod - time_scale[local_max[i]])/weekPeriod < 0.5:
useDayOfWeek = True
return useTimeOfDay, useDayOfWeek, local_min, local_max, strong_local_max
def get_suggested_timescale_and_encoder(timestamp, sig, thresh=0.2):
dt = np.median(np.diff(timestamp))
dt_sec = dt.astype('float32')
# resample the data with homogeneous sampling intervals
(timestamp, sig) = resample_data(timestamp, sig, dt, display=True)
(cum_cwt_var, cwt_var, time_scale) = calculate_cwt(dt_sec, sig)
(useTimeOfDay, useDayOfWeek, local_min, local_max, strong_local_max) = get_local_maxima(cwt_var, time_scale)
cutoff_time_scale = time_scale[np.where(cum_cwt_var >= thresh)[0][0]]
aggregation_time_scale = cutoff_time_scale/10.0
if aggregation_time_scale < dt_sec*4:
aggregation_time_scale = dt_sec*4
new_sampling_interval = str(int(aggregation_time_scale/4))+'S'
return (new_sampling_interval, useTimeOfDay, useDayOfWeek)
def readCSVfiles(fileName):
"""
Read csv data file, the data file must have two columns
with header "timestamp", and "value"
"""
fileReader = csv.reader(open(fileName, 'r'))
fileReader.next() # skip header line
timestamps = []
values = []
for row in fileReader:
timestamps.append(row[0])
values.append(row[1])
timestamps = np.array(timestamps, dtype='datetime64')
values = np.array(values, dtype='float32')
return (timestamps, values)
def writeCSVfiles(fileName, timestamp, value):
"""
write data to csv file,
the data file will have two columns with header "timestamp", and "value"
"""
fileWriter = csv.writer(open(fileName, 'w'))
fileWriter.writerow(['timestamp', 'value'])
for i in xrange(len(timestamp)):
fileWriter.writerow([timestamp[i].astype('O').strftime("%Y-%m-%d %H:%M:%S"),
value[i]])
def resample_data(timestamp, sig, new_sampling_interval, display=False):
"""
Resample time series data at new sampling interval using linear interpolation
Note: the resampling function is using interpolation, it may not be appropriate for aggregation purpose
:param timestamp: timestamp in numpy datetime64 type
:param sig: value of the time series
:param new_sampling_interval: new sampling interrval
"""
nSampleNew = np.floor((timestamp[-1] - timestamp[0])/new_sampling_interval).astype('int') + 1
timestamp_new = np.empty(nSampleNew, dtype='datetime64[s]')
for sampleI in xrange(nSampleNew):
timestamp_new[sampleI] = timestamp[0] + sampleI * new_sampling_interval
sig_new = np.interp((timestamp_new-timestamp[0]).astype('float32'),
(timestamp-timestamp[0]).astype('float32'), sig)
if display:
plt.figure(3)
plt.plot(timestamp, sig)
plt.plot(timestamp_new, sig_new)
plt.legend(['before resampling', 'after resampling'])
return (timestamp_new, sig_new)
def aggregate_data(thresh_list, dataFile, aggregatedDataPath, waveletDir='./wavelet/', display=False, verbose=0):
"""
Aggregate individual dataset, the aggregated data will be saved at aggregatedDataFile
:param thresh: aggregation threshold
:param dataFile: path of the original datafile
:param aggregatedDataFile: path of the aggregated datafile
:param waveletDir: path of wavelet transformations (for visual inspection)
"""
data_file_dir = dataFile.split('/')
(timestamp, sig) = readCSVfiles(dataFile)
# dt = (timestamp[len(sig)-1] - timestamp[0])/(len(sig)-1)
dt = np.median(np.diff(timestamp))
dt_sec = dt.astype('float32')
# resample the data with homogeneous sampling intervals
(timestamp, sig) = resample_data(timestamp, sig, dt, display=True)
(cum_cwt_var, cwt_var, time_scale) = calculate_cwt(dt_sec, sig,
display=display,
figDir=join(waveletDir, data_file_dir[-2]),
fileName=data_file_dir[-1])
for thresh in thresh_list:
new_data_dir = join(aggregatedDataPath, 'thresh='+str(thresh), data_file_dir[-2])
if not exists(new_data_dir):
os.makedirs(new_data_dir)
new_data_file = join(new_data_dir, data_file_dir[-1])
# determine aggregation time scale
cutoff_time_scale = time_scale[np.where(cum_cwt_var >= thresh)[0][0]]
aggregation_time_scale = cutoff_time_scale/10.0
if aggregation_time_scale < dt_sec*4:
aggregation_time_scale = dt_sec*4
new_sampling_interval = np.timedelta64(int(aggregation_time_scale/4 * 1000), 'ms')
nSampleNew = np.floor((timestamp[-1] - timestamp[0])/new_sampling_interval).astype('int') + 1
timestamp_new = np.empty(nSampleNew, dtype='datetime64[s]')
value_new = np.empty(nSampleNew, dtype='float32')
left_sampleI = 0
new_sampleI = 0
for sampleI in xrange(len(sig)):
if timestamp[sampleI] >= timestamp[0] + new_sampleI * new_sampling_interval:
timestamp_new[new_sampleI] = timestamp[0] + new_sampleI * new_sampling_interval
value_new[new_sampleI] = (np.mean(sig[left_sampleI:sampleI+1]))
left_sampleI = sampleI+1
new_sampleI += 1
writeCSVfiles(new_data_file, timestamp_new, value_new)
if verbose > 0:
print " original length: ", len(sig), "\t file: ", dataFile
print "\t\tthreshold: ", thresh, "\t new length: ", len(value_new)
def aggregate_nab_data(thresh_list, dataPath='data/',
aggregatedDataPath='data_aggregate/',
waveletDir='wavelet/',
verbose=0):
"""
Aggregate all NAB data using the wavelet transformation based algorithm
:param thresh_list: threshold of the aggregation, a number in [0, 1)
:param dataPath: path of the original NAB data
:param aggregatedDataPath: path of the aggregated NAB data
:param waveletDir: path of wavelet transformations (for visual inspection)
"""
if not exists(aggregatedDataPath):
os.makedirs(aggregatedDataPath)
dataDirs = [join(dataPath, f) for f in os.listdir(dataPath) if not isfile(join(dataPath, f))]
for dir in dataDirs:
datafiles = [join(dir, f) for f in os.listdir(dir) if isfile(join(dir, f))]
for i in range(len(datafiles)):
aggregate_data(thresh_list, datafiles[i], aggregatedDataPath, waveletDir, verbose=verbose)
def get_pre_aggregated_anomaly_score(data_path, result_folder, result_folder_pre_aggregate):
"""
This function transforms anomaly scores on the aggregated data file (in result_folder)
to the original sampling rate of the data (in data_path) before aggregation. The new anomaly
score will be saved to result_folder_pre_aggregate
"""
dataDirs = [join(result_folder, f) for f in os.listdir(result_folder) if not isfile(join(result_folder, f))]
for dir in dataDirs:
resultfiles = [join(dir, f) for f in os.listdir(dir) if isfile(join(dir, f))]
for i in range(len(resultfiles)):
result_file_dir = resultfiles[i].split('/')
original_data_file = join(data_path, result_file_dir[-2], result_file_dir[-1][8:])
dat = pd.read_csv(original_data_file, header=0, names=['timestamp', 'value'])
result = pd.read_csv(resultfiles[i], header=0,
names=['timestamp', 'value', 'anomaly_score', 'raw_score', 'label'])
time_stamp_pre_aggregation = pd.to_datetime(dat.timestamp)
time_stamp_after_aggregation = pd.to_datetime(result.timestamp)
binary_anomaly_score_pre_aggregation = np.zeros(shape=(len(dat),))
binary_anomaly_score_after_aggregation = np.zeros(shape=(len(result),))
for j in range(len(result)):
if result.anomaly_score[j] > .5:
binary_anomaly_score_after_aggregation[j] = 1
idx_original = np.argmin(abs(time_stamp_pre_aggregation - time_stamp_after_aggregation[j]))
binary_anomaly_score_pre_aggregation[idx_original] = 1
value_pre_aggregation = dat.value.values
raw_score_pre_aggregation = np.zeros(shape=(len(dat),))
label_pre_aggregation = np.zeros(shape=(len(dat),))
# raw_score_pre_aggregation = np.interp(time_stamp_original, time_stamp_after_aggregation, result.raw_score.values)
result_pre_aggregate = pd.DataFrame(np.transpose(np.array([time_stamp_pre_aggregation,
value_pre_aggregation,
binary_anomaly_score_pre_aggregation,
raw_score_pre_aggregation,
label_pre_aggregation])),
columns=['timestamp', 'value', 'anomaly_score', 'raw_score', 'label'])
result_file_dir_pre_aggregate = join(result_folder_pre_aggregate, result_file_dir[-2])
if not exists(result_file_dir_pre_aggregate):
os.makedirs(result_file_dir_pre_aggregate)
result_file_pre_aggregate = join(result_file_dir_pre_aggregate, result_file_dir[-1])
result_pre_aggregate.to_csv(result_file_pre_aggregate, index=False)
print " write pre-aggregated file to ", result_file_pre_aggregate
# compare anomaly scores before and after aggregations for individual files
# plt.figure(2)
# plt.plot(time_stamp_after_aggregation, binary_anomaly_score_after_aggregation)
# plt.plot(time_stamp_pre_aggregation, binary_anomaly_score_pre_aggregation)
def runTimeVsDataLength(dataPath):
"""
Plot Data Aggregation Algorithm Runtime vs length of the data
"""
dataDirs = [join(dataPath, f) for f in os.listdir(dataPath) if not isfile(join(dataPath, f))]
thresh = 0.2
dataLength = []
runTime = []
for dir in dataDirs:
datafiles = [join(dir, f) for f in os.listdir(dir) if isfile(join(dir, f))]
for i in range(len(datafiles)):
(timestamp, sig) = readCSVfiles(datafiles[i])
dataLength.append(len(sig))
start_time = time.time()
aggregate_data([thresh], datafiles[i], aggregatedDataPath='data_aggregate/', display=False)
end_time = time.time()
print " length: ", len(sig), " file: ", datafiles[i], " Time: ", (end_time - start_time)
runTime.append(end_time - start_time)
plt.figure()
plt.plot(dataLength, runTime, '*')
plt.xlabel(' Dataset Size (# Record)')
plt.ylabel(' Runtime (seconds) ')
plt.savefig('RuntimeVsDatasetSize.pdf')
return (dataLength, runTime)
if __name__ == "__main__":
NABPath = '/Users/ycui/nta/NAB/'
currentPath = os.getcwd()
thresh_list = [0, 0.02, 0.04, 0.06, 0.08, 0.1, 0.12, 0.14, 0.16, 0.18, 0.2,
0.22, 0.24, 0.26, 0.28, 0.3, 0.32, 0.34, 0.36, 0.38, 0.40]
# step 1: aggregate NAB data with different threshold
print " aggregating NAB data ..."
aggregate_nab_data(thresh_list, dataPath=NABPath+'data/', verbose=2)
# step 2: run HTM on aggregated NAB data
for thresh in thresh_list:
resultsAggregatePath = currentPath + "/results_aggregate/thresh=" + str(thresh) + "/numenta"
if not os.path.exists(resultsAggregatePath):
os.os.makedirs(resultsAggregatePath)
print " run HTM on aggregated data with threshold " + str(thresh)
os.system("python " + NABPath + "run.py -d numenta --detect --dataDir " + currentPath + "/data_aggregate/thresh=" + str(thresh) + \
"/ --resultsDir "+ currentPath + "/results_aggregate/thresh=" + str(thresh) + " --skipConfirmation")
# step 3: get pre-aggregated anomaly score
for thresh in thresh_list:
preresultAggregatePath = currentPath + "/results_pre_aggregate/thresh=" + str(thresh) + "/numenta"
if not os.path.exists(preresultAggregatePath):
os.os.makedirs(preresultAggregatePath)
get_pre_aggregated_anomaly_score(data_path=NABPath+'data/',
result_folder='results_aggregate/thresh=' + str(thresh) + '/numenta',
result_folder_pre_aggregate='results_pre_aggregate/thresh=' + str(thresh) + '/numenta')
# step 4: run NAB scoring
for thresh in thresh_list:
print " run scoring on aggregated data with threshold " + str(thresh)
os.system("python " + NABPath + "run.py -d numenta --score --skipConfirmation " +
"--thresholdsFile " + NABPath + "config/thresholds.json " +
"--resultsDir " + currentPath + "/results_pre_aggregate/thresh="+str(thresh)+"/")
# step 5: read & compare scores
standard_score = []
data_length_all = []
for thresh in thresh_list:
scorefile = "./results_pre_aggregate/thresh=" + str(thresh) + "/numenta/numenta_standard_scores.csv"
scoredf = pd.read_csv(scorefile, header=0)
scoredf = scoredf.sort('File')
scoredf.index = range(len(scoredf))
standard_score.append(scoredf.Score.values[:-1])
data_length = []
for i in xrange(len(scoredf.File)-1):
datafile = './data_aggregate/thresh=' + str(thresh) + '/' + scoredf.File[i]
dat = pd.read_csv(datafile, header=0, names=['timestamp', 'value'])
data_length.append(len(dat))
data_length_all.append(data_length)
data_length_all = np.array(data_length_all)
standard_score = np.array(standard_score)
short_dat = np.where(data_length_all[0, :] < 1000)[0]
long_dat = np.where(data_length_all[0, :] > 1000)[0]
use_dat = np.array(range(data_length_all.shape[1]))
use_dat = long_dat
# plt.imshow(data_length_all, interpolation='nearest', aspect='auto')
# plot anomaly score vs aggregation threshold
anomaly_score_diff = standard_score[:, long_dat] - numpy.matlib.repmat(standard_score[0, long_dat], len(thresh_list), 1)
shortFileName = []
for i in range(len(scoredf.File.values[:-1])):
file = scoredf.File.values[i]
fileName = file.split('/')[-1]
fileName = fileName[:-4]
shortFileName.append(fileName)
fig=plt.figure()
plt.imshow(anomaly_score_diff, interpolation='nearest', aspect='auto')
ytickLoc = range(len(thresh_list))
plt.yticks(ytickLoc, thresh_list)
plt.xticks(range(len(scoredf.File)-1), shortFileName, rotation='vertical')
plt.subplots_adjust(bottom=0.6)
plt.ylabel(' Threshold')
plt.title(' Anomaly Score Relative to BaseLine')
plt.colorbar()
plt.clim(-2, 2)
plt.savefig('AnomalyScore_Vs_AggregationThreshold_EachFile.pdf')
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(np.array(thresh_list)*100, np.median(standard_score[:, use_dat], 1), '-o')
plt.plot(np.array(thresh_list)*100, np.mean(standard_score[:, use_dat], 1), '-o')
plt.legend(['Median', 'Mean'])
plt.xlabel(' Threshold (%)')
plt.ylabel(' Median Anomaly Score ')
plt.subplot(2, 1, 2)
plt.plot(np.array(thresh_list)*100, np.median(data_length_all[:, use_dat], 1), '-o')
plt.plot(np.array(thresh_list)*100, np.mean(data_length_all[:, use_dat], 1), '-o')
plt.xlabel(' Threshold (%)')
plt.ylabel(' Data Length ')
plt.legend(['Median', 'Mean'])
plt.savefig('AnomalyScore_Vs_AggregationThreshold.pdf')
num_better_anomaly_score = []
for i in xrange(len(thresh_list)-1):
num_better_anomaly_score.append(len(np.where(standard_score[i+1, :] > standard_score[0, :])[0]))
(dataLength, runTime) = runTimeVsDataLength(dataPath=NABPath+'data/')
| agpl-3.0 |
jbedorf/tensorflow | tensorflow/lite/experimental/micro/examples/micro_speech/apollo3/captured_data_to_wav.py | 11 | 1442 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts values pulled from the microcontroller into audio files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import struct
# import matplotlib.pyplot as plt
import numpy as np
import soundfile as sf
def new_data_to_array(fn):
vals = []
with open(fn) as f:
for n, line in enumerate(f):
if n != 0:
vals.extend([int(v, 16) for v in line.split()])
b = ''.join(map(chr, vals))
y = struct.unpack('<' + 'h' * int(len(b) / 2), b)
return y
data = 'captured_data.txt'
values = np.array(new_data_to_array(data)).astype(float)
# plt.plot(values, 'o-')
# plt.show(block=False)
wav = values / np.max(np.abs(values))
sf.write('captured_data.wav', wav, 16000)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.