repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
alvarofierroclavero/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
RomainBrault/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
pfnet/maf_example | caffe/util/classify.py | 1 | 1329 | import maflib.util
import numpy as np
import matplotlib.pyplot as plt
import caffe
import cPickle
def vis_square(data, padsize=1, padval=0):
data -= data.min()
data /= data.max()
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
return data
def write(data, out_file):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.imshow(data)
plt.savefig(out_file)
@maflib.util.rule
def classify(task):
model_file = task.inputs[0].abspath()
pretrained_file = '%s_iter_%d.caffemodel' % (task.inputs[1].abspath(), task.parameter['max_iter'])
net = caffe.Classifier(model_file, pretrained_file)
net.set_phase_test()
net.set_mode_cpu()
with open(task.parameter['image_file'], 'rb') as f:
data = map(lambda x: x.reshape((32, 32, 3)), cPickle.load(f)['data'][:20])
net.predict(data)
d = net.params['conv1'][0].data.transpose(0, 2, 3, 1)
dd = vis_square(d)
write(dd, task.outputs[0].abspath())
| bsd-3-clause |
jergosh/slr_pipeline | bin/find_neighbourhood.py | 1 | 1938 | import re
import sys
import glob
import csv
from os import path
from argparse import ArgumentParser
from subprocess import Popen
import operator
from Bio import PDB
import numpy as np
import pandas
def rid2str(r):
return r.id[0] + str(r.id[1]) + r.id[2]
argparser = ArgumentParser()
argparser.add_argument("--pdbmap", metavar="pdb_map", type=str, required=True)
argparser.add_argument("--pdbdir", metavar="pdb_dir", type=str, required=True)
argparser.add_argument("--outfile", metavar="out_dir", type=str, required=True)
argparser.add_argument("--thr", metavar="thr", type=float, default=0.05)
args = argparser.parse_args()
def find_neighbourhood(df, thr):
pdb_id = df.pdb_id.iloc[0]
stable_id = df.stable_id.iloc[0]
chain_id = df.pdb_chain.iloc[0]
try:
pdb = p.get_structure(pdb_id, pdbfile)
pdb_chain = pdb[0][chain_id]
except IOError, e:
print >>sys.stderr, "PDB file", pdb_id, "missing!"
return
out_df = pandas.DataFrame(columns=('source', 'pdb_pos'))
out_residues = set()
for i, row in df.iterrows():
res_id = parse_coord(row.pdb_pos)
try:
r = pdb_chain[res_id]
except KeyError, e:
r = find_sequential(pdb_chain, res_id)
if r is None:
raise e
for r2 in pdb_chain:
if r2 in out_residues:
print "Residue already in output_set"
continue
if r - r2 < thr:
out_df.loc[df.shape[0]] = [ rid2str(r), rid2str(r2) ]
out_residues.add(r2)
return out_df
if __name__ == "__main__":
pdb_map = pandas.read_table(args.pdbmap, dtype={ "stable_id": str, "pdb_id": str, "pdb_pos": str, "omega": np.float64 })
out_map = pdb_map.groupby(["stable_id", "pdb_id", "pdb_chain"]).apply(find_neighbourhood, args.thr)
out_map.to_csv(args.outfile, sep="\t", quoting=csv.QUOTE_NONE)
| gpl-2.0 |
cactusbin/nyt | matplotlib/examples/api/legend_demo.py | 3 | 1134 | """
Demo of the legend function with a few features.
In addition to the basic legend, this demo shows a few optional features:
* Custom legend placement.
* A keyword argument to a drop-shadow.
* Setting the background color.
* Setting the font size.
* Setting the line width.
"""
import numpy as np
import matplotlib.pyplot as plt
# Example data
a = np.arange(0,3, .02)
b = np.arange(0,3, .02)
c = np.exp(a)
d = c[::-1]
# Create plots with pre-defined labels.
# Alternatively, you can pass labels explicitly when calling `legend`.
fig, ax = plt.subplots()
ax.plot(a, c, 'k--', label='Model length')
ax.plot(a, d, 'k:', label='Data length')
ax.plot(a, c+d, 'k', label='Total message length')
# Now add the legend with some customizations.
legend = ax.legend(loc='upper center', shadow=True)
# The frame is matplotlib.patches.Rectangle instance surrounding the legend.
frame = legend.get_frame()
frame.set_facecolor('0.90')
# Set the fontsize
for label in legend.get_texts():
label.set_fontsize('large')
for label in legend.get_lines():
label.set_linewidth(1.5) # the legend line width
plt.show()
| unlicense |
qbilius/streams | streams/metrics/classifiers.py | 1 | 5903 | from collections import OrderedDict
import numpy as np
import scipy.stats
import pandas
import sklearn, sklearn.svm, sklearn.preprocessing, sklearn.linear_model
import streams.utils
class MatchToSampleClassifier(object):
def __init__(self, norm=True, nfeats=None, seed=None, C=1):
"""
A classifier for the Delayed Match-to-Sample task.
It is formulated as a typical sklearn classifier with `score`, `predict_proba`
and `fit` methods available.
:Kwargs:
- norm (bool, default: True)
Whether to zscore features or not.
- nfeats (int or None, default: None)
The number of features to use. Useful when you want to match the
number of features across layers. If None, all features are used.
- seed (int or None, default: None)
Random seed for feature selecition
"""
self.norm = norm
self.nfeats = nfeats
self.seed = seed
self.C = C
def preproc(self, X, reset=False):
if self.norm:
if reset:
self.scaler = sklearn.preprocessing.StandardScaler().fit(X)
X = self.scaler.transform(X)
else:
self.scaler = None
if self.nfeats is not None:
if reset:
sel = np.random.RandomState(self.seed).permutation(X.shape[1])[:self.nfeats]
X = X[:,sel]
return X
def fit(self, X, y, order=None):#, decision_function_shape='ovo'):
"""
:Kwargs:
- order
Label order. If None, will be sorted alphabetically
"""
# self.decision_function_shape = decision_function_shape
if order is None:
order = np.unique(y)
self.label_dict = OrderedDict([(obj,o) for o,obj in enumerate(order)])
y = self.labels2inds(y)
X = self.preproc(X, reset=True)
# self.clf = sklearn.svm.SVC(kernel='linear', probability=True,
# decision_function_shape='ovr', C=self.C)#decision_function_shape)
self.clf = sklearn.linear_model.LogisticRegression(multi_class='multinomial', solver='newton-cg', C=self.C)
self.clf.fit(X, y)
def _acc(self, x, y):
return x / (x + y)
def _dprime(self, x, y):
return scipy.stats.norm.ppf(x) - scipy.stats.norm.ppf(y)
def predict_proba(self, X, targets=None, distrs=None, kind='2-way', measure='acc'):
"""
Model classification confidence (range 0-1)
"""
if not hasattr(self, 'clf'):
raise Exception('Must train the classifier first')
if measure not in ['acc', 'dprime', "d'"]:
raise ValueError('measure {} not recognized'.format(measure))
measure_op = self._acc if measure == 'acc' else self._dprime
X = self.preproc(X)
conf = self.clf.predict_proba(X)
# conf = self.clf.decision_function(X)
if targets is not None:
if isinstance(targets, str):
targets = [targets]
ti = self.labels2inds(targets)
# target probability
t = np.array([x[i] for x,i in zip(conf, ti)])
if distrs is not None:
if isinstance(distrs, str):
distrs = [distrs]
dinds = self.labels2inds(distrs)
# distractor probability
d = np.array([c[di] for c, di in zip(conf, dinds)])
acc = measure_op(t, d)
elif kind == '2-way':
acc = []
for c,target in zip(conf,targets):
ti = self.label_dict[target]
c_tmp = []
# compute distractor probability for each distractor
for di in self.label_dict.values():
if di != ti:
tmp = measure_op(c[ti], c[di])
c_tmp.append(tmp)
# c_tmp.append(c[di])
else:
# c_tmp.append(c[ti])
c_tmp.append(np.nan)
acc.append(c_tmp)
acc = pandas.DataFrame(acc, index=targets, columns=list(self.label_dict.keys()))
else:
acc = t
else:
acc = conf
return acc
def labels2inds(self, y):
"""
Converts class labels (usually strings) to indices
"""
return np.array([self.label_dict[x] for x in y])
def score(self, X, y, kind='2-way', measure='dprime', cap=5):
"""
Classification accuracy.
Accuracy is either 0 or 1. For a 2-way classifier, this depends on
`predict_proba` being less or more that .5. For an n-way classifier, it
checks if argmax of `predict_proba` gives the correct or incorrect class.
"""
if kind == '2-way':
acc = self.predict_proba(X, targets=y, kind=kind)
acc[~np.isnan(acc)] = acc[~np.isnan(acc)] > .5
else:
conf = self.predict_proba(X, kind=kind)
y = self.labels2inds(y)
acc = np.argmax(conf, 1) == y
if measure == 'dprime':
import ipdb; ipdb.set_trace()
acc = streams.utils.hitrate_to_dprime_o1(acc, cap=cap)
return acc
class CorrelationClassifier(object):
def __init__(self):
# self.clf =
pass
def fit(self, X, y):
self.tokens = X
self.labels = np.array(y)
def predict(self, X):
y = self.tokens
feats = np.row_stack([y, X])
corr = np.corrcoef(feats)
proba = corr[:len(y), len(y):]
proba /= proba.sum(0) # normalize to sum=1
proba = proba.T # (n_samples, n_classes)
labels = self.labels[proba.argmax(1)]
return labels
| gpl-3.0 |
phobson/statsmodels | statsmodels/tools/tools.py | 1 | 16566 | '''
Utility functions models code
'''
from statsmodels.compat.python import reduce, lzip, lmap, asstr2, range, long
import numpy as np
import numpy.lib.recfunctions as nprf
import numpy.linalg as L
from scipy.linalg import svdvals
from statsmodels.datasets import webuse
from statsmodels.tools.data import _is_using_pandas, _is_recarray
from statsmodels.compat.numpy import np_matrix_rank
def _make_dictnames(tmp_arr, offset=0):
"""
Helper function to create a dictionary mapping a column number
to the name in tmp_arr.
"""
col_map = {}
for i, col_name in enumerate(tmp_arr):
col_map.update({i+offset : col_name})
return col_map
def drop_missing(Y, X=None, axis=1):
"""
Returns views on the arrays Y and X where missing observations are dropped.
Y : array-like
X : array-like, optional
axis : int
Axis along which to look for missing observations. Default is 1, ie.,
observations in rows.
Returns
-------
Y : array
All Y where the
X : array
Notes
-----
If either Y or X is 1d, it is reshaped to be 2d.
"""
Y = np.asarray(Y)
if Y.ndim == 1:
Y = Y[:, None]
if X is not None:
X = np.array(X)
if X.ndim == 1:
X = X[:, None]
keepidx = np.logical_and(~np.isnan(Y).any(axis),
~np.isnan(X).any(axis))
return Y[keepidx], X[keepidx]
else:
keepidx = ~np.isnan(Y).any(axis)
return Y[keepidx]
# TODO: needs to better preserve dtype and be more flexible
# ie., if you still have a string variable in your array you don't
# want to cast it to float
# TODO: add name validator (ie., bad names for datasets.grunfeld)
def categorical(data, col=None, dictnames=False, drop=False, ):
'''
Returns a dummy matrix given an array of categorical variables.
Parameters
----------
data : array
A structured array, recarray, or array. This can be either
a 1d vector of the categorical variable or a 2d array with
the column specifying the categorical variable specified by the col
argument.
col : 'string', int, or None
If data is a structured array or a recarray, `col` can be a string
that is the name of the column that contains the variable. For all
arrays `col` can be an int that is the (zero-based) column index
number. `col` can only be None for a 1d array. The default is None.
dictnames : bool, optional
If True, a dictionary mapping the column number to the categorical
name is returned. Used to have information about plain arrays.
drop : bool
Whether or not keep the categorical variable in the returned matrix.
Returns
--------
dummy_matrix, [dictnames, optional]
A matrix of dummy (indicator/binary) float variables for the
categorical data. If dictnames is True, then the dictionary
is returned as well.
Notes
-----
This returns a dummy variable for EVERY distinct variable. If a
a structured or recarray is provided, the names for the new variable is the
old variable name - underscore - category name. So if the a variable
'vote' had answers as 'yes' or 'no' then the returned array would have to
new variables-- 'vote_yes' and 'vote_no'. There is currently
no name checking.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
Univariate examples
>>> import string
>>> string_var = [string.lowercase[0:5], string.lowercase[5:10], \
string.lowercase[10:15], string.lowercase[15:20], \
string.lowercase[20:25]]
>>> string_var *= 5
>>> string_var = np.asarray(sorted(string_var))
>>> design = sm.tools.categorical(string_var, drop=True)
Or for a numerical categorical variable
>>> instr = np.floor(np.arange(10,60, step=2)/10)
>>> design = sm.tools.categorical(instr, drop=True)
With a structured array
>>> num = np.random.randn(25,2)
>>> struct_ar = np.zeros((25,1), dtype=[('var1', 'f4'),('var2', 'f4'), \
('instrument','f4'),('str_instr','a5')])
>>> struct_ar['var1'] = num[:,0][:,None]
>>> struct_ar['var2'] = num[:,1][:,None]
>>> struct_ar['instrument'] = instr[:,None]
>>> struct_ar['str_instr'] = string_var[:,None]
>>> design = sm.tools.categorical(struct_ar, col='instrument', drop=True)
Or
>>> design2 = sm.tools.categorical(struct_ar, col='str_instr', drop=True)
'''
if isinstance(col, (list, tuple)):
try:
assert len(col) == 1
col = col[0]
except:
raise ValueError("Can only convert one column at a time")
# TODO: add a NameValidator function
# catch recarrays and structured arrays
if data.dtype.names or data.__class__ is np.recarray:
if not col and np.squeeze(data).ndim > 1:
raise IndexError("col is None and the input array is not 1d")
if isinstance(col, (int, long)):
col = data.dtype.names[col]
if col is None and data.dtype.names and len(data.dtype.names) == 1:
col = data.dtype.names[0]
tmp_arr = np.unique(data[col])
# if the cols are shape (#,) vs (#,1) need to add an axis and flip
_swap = True
if data[col].ndim == 1:
tmp_arr = tmp_arr[:, None]
_swap = False
tmp_dummy = (tmp_arr == data[col]).astype(float)
if _swap:
tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1, 0)
if not tmp_arr.dtype.names: # how do we get to this code path?
tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr)]
elif tmp_arr.dtype.names:
tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr.tolist())]
# prepend the varname and underscore, if col is numeric attribute
# lookup is lost for recarrays...
if col is None:
try:
col = data.dtype.names[0]
except:
col = 'var'
# TODO: the above needs to be made robust because there could be many
# var_yes, var_no varaibles for instance.
tmp_arr = [col + '_' + item for item in tmp_arr]
# TODO: test this for rec and structured arrays!!!
if drop is True:
if len(data.dtype) <= 1:
if tmp_dummy.shape[0] < tmp_dummy.shape[1]:
tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1, 0)
dt = lzip(tmp_arr, [tmp_dummy.dtype.str]*len(tmp_arr))
# preserve array type
return np.array(lmap(tuple, tmp_dummy.tolist()),
dtype=dt).view(type(data))
data = nprf.drop_fields(data, col, usemask=False,
asrecarray=type(data) is np.recarray)
data = nprf.append_fields(data, tmp_arr, data=tmp_dummy,
usemask=False,
asrecarray=type(data) is np.recarray)
return data
# handle ndarrays and catch array-like for an error
elif data.__class__ is np.ndarray or not isinstance(data, np.ndarray):
if not isinstance(data, np.ndarray):
raise NotImplementedError("Array-like objects are not supported")
if isinstance(col, (int, long)):
offset = data.shape[1] # need error catching here?
tmp_arr = np.unique(data[:, col])
tmp_dummy = (tmp_arr[:, np.newaxis] == data[:, col]).astype(float)
tmp_dummy = tmp_dummy.swapaxes(1, 0)
if drop is True:
offset -= 1
data = np.delete(data, col, axis=1).astype(float)
data = np.column_stack((data, tmp_dummy))
if dictnames is True:
col_map = _make_dictnames(tmp_arr, offset)
return data, col_map
return data
elif col is None and np.squeeze(data).ndim == 1:
tmp_arr = np.unique(data)
tmp_dummy = (tmp_arr[:, None] == data).astype(float)
tmp_dummy = tmp_dummy.swapaxes(1, 0)
if drop is True:
if dictnames is True:
col_map = _make_dictnames(tmp_arr)
return tmp_dummy, col_map
return tmp_dummy
else:
data = np.column_stack((data, tmp_dummy))
if dictnames is True:
col_map = _make_dictnames(tmp_arr, offset=1)
return data, col_map
return data
else:
raise IndexError("The index %s is not understood" % col)
# TODO: add an axis argument to this for sysreg
def add_constant(data, prepend=True, has_constant='skip'):
"""
Adds a column of ones to an array
Parameters
----------
data : array-like
`data` is the column-ordered design matrix
prepend : bool
If true, the constant is in the first column. Else the constant is
appended (last column).
has_constant : str {'raise', 'add', 'skip'}
Behavior if ``data'' already has a constant. The default will return
data without adding another constant. If 'raise', will raise an
error if a constant is present. Using 'add' will duplicate the
constant, if one is present.
Returns
-------
data : array, recarray or DataFrame
The original values with a constant (column of ones) as the first or
last column. Returned value depends on input type.
Notes
-----
When the input is recarray or a pandas Series or DataFrame, the added
column's name is 'const'.
"""
if _is_using_pandas(data, None) or _is_recarray(data):
from statsmodels.tsa.tsatools import add_trend
return add_trend(data, trend='c', prepend=prepend, has_constant=has_constant)
# Special case for NumPy
x = np.asanyarray(data)
if x.ndim == 1:
x = x[:,None]
elif x.ndim > 2:
raise ValueError('Only implementd 2-dimensional arrays')
is_nonzero_const = np.ptp(x, axis=0) == 0
is_nonzero_const &= np.all(x != 0.0, axis=0)
if is_nonzero_const.any():
if has_constant == 'skip':
return x
elif has_constant == 'raise':
raise ValueError("data already contains a constant")
x = [np.ones(x.shape[0]), x]
x = x if prepend else x[::-1]
return np.column_stack(x)
def isestimable(C, D):
""" True if (Q, P) contrast `C` is estimable for (N, P) design `D`
From an Q x P contrast matrix `C` and an N x P design matrix `D`, checks if
the contrast `C` is estimable by looking at the rank of ``vstack([C,D])``
and verifying it is the same as the rank of `D`.
Parameters
----------
C : (Q, P) array-like
contrast matrix. If `C` has is 1 dimensional assume shape (1, P)
D: (N, P) array-like
design matrix
Returns
-------
tf : bool
True if the contrast `C` is estimable on design `D`
Examples
--------
>>> D = np.array([[1, 1, 1, 0, 0, 0],
... [0, 0, 0, 1, 1, 1],
... [1, 1, 1, 1, 1, 1]]).T
>>> isestimable([1, 0, 0], D)
False
>>> isestimable([1, -1, 0], D)
True
"""
C = np.asarray(C)
D = np.asarray(D)
if C.ndim == 1:
C = C[None, :]
if C.shape[1] != D.shape[1]:
raise ValueError('Contrast should have %d columns' % D.shape[1])
new = np.vstack([C, D])
if np_matrix_rank(new) != np_matrix_rank(D):
return False
return True
def pinv_extended(X, rcond=1e-15):
"""
Return the pinv of an array X as well as the singular values
used in computation.
Code adapted from numpy.
"""
X = np.asarray(X)
X = X.conjugate()
u, s, vt = np.linalg.svd(X, 0)
s_orig = np.copy(s)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond * np.maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = np.dot(np.transpose(vt), np.multiply(s[:, np.core.newaxis],
np.transpose(u)))
return res, s_orig
def recipr(X):
"""
Return the reciprocal of an array, setting all entries less than or
equal to 0 to 0. Therefore, it presumes that X should be positive in
general.
"""
x = np.maximum(np.asarray(X).astype(np.float64), 0)
return np.greater(x, 0.) / (x + np.less_equal(x, 0.))
def recipr0(X):
"""
Return the reciprocal of an array, setting all entries equal to 0
as 0. It does not assume that X should be positive in
general.
"""
test = np.equal(np.asarray(X), 0)
return np.where(test, 0, 1. / X)
def clean0(matrix):
"""
Erase columns of zeros: can save some time in pseudoinverse.
"""
colsum = np.add.reduce(matrix**2, 0)
val = [matrix[:, i] for i in np.flatnonzero(colsum)]
return np.array(np.transpose(val))
def rank(X, cond=1.0e-12):
"""
Return the rank of a matrix X based on its generalized inverse,
not the SVD.
"""
from warnings import warn
warn("rank is deprecated and will be removed in 0.7."
" Use np.linalg.matrix_rank instead.", FutureWarning)
X = np.asarray(X)
if len(X.shape) == 2:
D = svdvals(X)
return int(np.add.reduce(np.greater(D / D.max(),
cond).astype(np.int32)))
else:
return int(not np.alltrue(np.equal(X, 0.)))
def fullrank(X, r=None):
"""
Return a matrix whose column span is the same as X.
If the rank of X is known it can be specified as r -- no check
is made to ensure that this really is the rank of X.
"""
if r is None:
r = np_matrix_rank(X)
V, D, U = L.svd(X, full_matrices=0)
order = np.argsort(D)
order = order[::-1]
value = []
for i in range(r):
value.append(V[:, order[i]])
return np.asarray(np.transpose(value)).astype(np.float64)
def unsqueeze(data, axis, oldshape):
"""
Unsqueeze a collapsed array
>>> from numpy import mean
>>> from numpy.random import standard_normal
>>> x = standard_normal((3,4,5))
>>> m = mean(x, axis=1)
>>> m.shape
(3, 5)
>>> m = unsqueeze(m, 1, x.shape)
>>> m.shape
(3, 1, 5)
>>>
"""
newshape = list(oldshape)
newshape[axis] = 1
return data.reshape(newshape)
def chain_dot(*arrs):
"""
Returns the dot product of the given matrices.
Parameters
----------
arrs: argument list of ndarray
Returns
-------
Dot product of all arguments.
Examples
--------
>>> import numpy as np
>>> from statsmodels.tools import chain_dot
>>> A = np.arange(1,13).reshape(3,4)
>>> B = np.arange(3,15).reshape(4,3)
>>> C = np.arange(5,8).reshape(3,1)
>>> chain_dot(A,B,C)
array([[1820],
[4300],
[6780]])
"""
return reduce(lambda x, y: np.dot(y, x), arrs[::-1])
def nan_dot(A, B):
"""
Returns np.dot(left_matrix, right_matrix) with the convention that
nan * 0 = 0 and nan * x = nan if x != 0.
Parameters
----------
A, B : np.ndarrays
"""
# Find out who should be nan due to nan * nonzero
should_be_nan_1 = np.dot(np.isnan(A), (B != 0))
should_be_nan_2 = np.dot((A != 0), np.isnan(B))
should_be_nan = should_be_nan_1 + should_be_nan_2
# Multiply after setting all nan to 0
# This is what happens if there were no nan * nonzero conflicts
C = np.dot(np.nan_to_num(A), np.nan_to_num(B))
C[should_be_nan] = np.nan
return C
def maybe_unwrap_results(results):
"""
Gets raw results back from wrapped results.
Can be used in plotting functions or other post-estimation type
routines.
"""
return getattr(results, '_results', results)
class Bunch(dict):
"""
Returns a dict-like object with keys accessible via attribute lookup.
"""
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
webuse = np.deprecate(webuse,
old_name='statsmodels.tools.tools.webuse',
new_name='statsmodels.datasets.webuse',
message='webuse will be removed from the tools '
'namespace in the 0.7.0 release. Please use the'
' new import.')
| bsd-3-clause |
ronggong/jingjuSingingPhraseMatching | general/dtwSankalp.py | 1 | 1657 | import sys,os
import matplotlib.pyplot as plt
# the project folder: fileDir
fileDir = os.path.dirname(os.path.realpath('__file__'))
dtwPath = os.path.join(fileDir, '../../Library_PythonNew/similarityMeasures/dtw/')
# transcribe the path string to full path
dtwPath = os.path.abspath(os.path.realpath(dtwPath))
sys.path.append(dtwPath)
import dtw
import numpy as np
def dtw1d_generic(x, y):
configuration = {}
configuration['distType'] = 1 # square euclidean
configuration['hasGlobalConst'] = 0
configuration['globalType'] = 0
configuration['bandwidth'] = 0.2
configuration['initCostMtx'] = 1
configuration['reuseCostMtx'] = 0
configuration['delStep'] = 1 # one is horizontal, another is vertical, but I don't which is which
configuration['moveStep'] = 2
configuration['diagStep'] = 1
configuration['initFirstCol'] = 1
configuration['isSubsequence'] = 0
udist,_,path,_ = dtw.dtw1d_GLS(x, y, configuration)
return udist,path
def dtw1d_std(x,y):
configuration = {}
configuration['Output'] = 3
configuration['Ldistance'] = {}
configuration['Ldistance']['type'] = 0
udist,_,path = dtw.dtw1d(x,y,configuration)
return udist,path
def dtwNd(x,y):
configuration = {}
configuration['Output'] = 2
configuration['Ldistance'] = {}
configuration['Ldistance']['type'] = 0
configuration['Ldistance']['weight'] = np.ones((x.shape[1],))/float(x.shape[1])
udist,plen = dtw.dtwNd(x,y,configuration)
return udist,plen
def plotDTW(path,cost_arr):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.pcolormesh(cost_arr)
plt.show() | agpl-3.0 |
cython-testbed/pandas | pandas/tests/indexing/test_categorical.py | 5 | 26870 | # -*- coding: utf-8 -*-
import pytest
import pandas as pd
import pandas.compat as compat
import numpy as np
from pandas import (Series, DataFrame, Timestamp, Categorical,
CategoricalIndex, Interval, Index)
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas.util import testing as tm
from pandas.core.dtypes.common import is_categorical_dtype
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.dtypes.dtypes import CategoricalDtype
class TestCategoricalIndex(object):
def setup_method(self, method):
self.df = DataFrame({'A': np.arange(6, dtype='int64'),
'B': Series(list('aabbca')).astype(
CDT(list('cab')))}).set_index('B')
self.df2 = DataFrame({'A': np.arange(6, dtype='int64'),
'B': Series(list('aabbca')).astype(
CDT(list('cabe')))}).set_index('B')
self.df3 = DataFrame({'A': np.arange(6, dtype='int64'),
'B': (Series([1, 1, 2, 1, 3, 2])
.astype(CDT([3, 2, 1], ordered=True)))
}).set_index('B')
self.df4 = DataFrame({'A': np.arange(6, dtype='int64'),
'B': (Series([1, 1, 2, 1, 3, 2])
.astype(CDT([3, 2, 1], ordered=False)))
}).set_index('B')
def test_loc_scalar(self):
result = self.df.loc['a']
expected = (DataFrame({'A': [0, 1, 5],
'B': (Series(list('aaa'))
.astype(CDT(list('cab'))))})
.set_index('B'))
assert_frame_equal(result, expected)
df = self.df.copy()
df.loc['a'] = 20
expected = (DataFrame({'A': [20, 20, 2, 3, 4, 20],
'B': (Series(list('aabbca'))
.astype(CDT(list('cab'))))})
.set_index('B'))
assert_frame_equal(df, expected)
# value not in the categories
pytest.raises(KeyError, lambda: df.loc['d'])
def f():
df.loc['d'] = 10
pytest.raises(TypeError, f)
def f():
df.loc['d', 'A'] = 10
pytest.raises(TypeError, f)
def f():
df.loc['d', 'C'] = 10
pytest.raises(TypeError, f)
def test_getitem_scalar(self):
cats = Categorical([Timestamp('12-31-1999'),
Timestamp('12-31-2000')])
s = Series([1, 2], index=cats)
expected = s.iloc[0]
result = s[cats[0]]
assert result == expected
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
assert sliced == "d"
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
tm.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
exp = np.array([4, 3, 2, 1], dtype=np.int64)
tm.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
expected = Series([11, Interval(0, 25)], index=['value', 'D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
index=np.arange(10, 20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9, Interval(0, 25)], index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = Categorical(
["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 2, 3, 4, 5, 6, 7]
df = DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
cats2 = Categorical(["b", "c"], categories=["a", "b", "c"])
idx2 = Index(["j", "k"])
values2 = [3, 4]
# 2:4,: | "j":"k",:
exp_df = DataFrame({"cats": cats2, "values": values2}, index=idx2)
# :,"cats" | :,0
exp_col = Series(cats, index=idx, name='cats')
# "j",: | 2,:
exp_row = Series(["b", 3], index=["cats", "values"], dtype="object",
name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4, :]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
# row
res_row = df.iloc[2, :]
tm.assert_series_equal(res_row, exp_row)
assert isinstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
assert is_categorical_dtype(res_col)
# single value
res_val = df.iloc[2, 0]
assert res_val == exp_val
# loc
# frame
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
assert isinstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
assert is_categorical_dtype(res_col)
# single value
res_val = df.loc["j", "cats"]
assert res_val == exp_val
# ix
# frame
# res_df = df.loc["j":"k",[0,1]] # doesn't work?
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
assert isinstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
assert is_categorical_dtype(res_col)
# single value
res_val = df.loc["j", df.columns[0]]
assert res_val == exp_val
# iat
res_val = df.iat[2, 0]
assert res_val == exp_val
# at
res_val = df.at["j", "cats"]
assert res_val == exp_val
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy, exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy, exp_fancy)
# get_value
res_val = df.at["j", "cats"]
assert res_val == exp_val
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
assert isinstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2, 4)]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
res_df = df.iloc[[2, 3]]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
assert is_categorical_dtype(res_col)
res_df = df.iloc[:, slice(0, 2)]
tm.assert_frame_equal(res_df, df)
assert is_categorical_dtype(res_df["cats"])
res_df = df.iloc[:, [0, 1]]
tm.assert_frame_equal(res_df, df)
assert is_categorical_dtype(res_df["cats"])
def test_slicing_doc_examples(self):
# GH 7918
cats = Categorical(["a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n", ])
values = [1, 2, 2, 2, 3, 4, 5]
df = DataFrame({"cats": cats, "values": values}, index=idx)
result = df.iloc[2:4, :]
expected = DataFrame(
{"cats": Categorical(['b', 'b'], categories=['a', 'b', 'c']),
"values": [2, 2]}, index=['j', 'k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4, :].dtypes
expected = Series(['category', 'int64'], ['cats', 'values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", "cats"]
expected = Series(Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c']),
index=['h', 'i', 'j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", df.columns[0:1]]
expected = DataFrame({'cats': Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c'])},
index=['h', 'i', 'j'])
tm.assert_frame_equal(result, expected)
def test_getitem_category_type(self):
# GH 14580
# test iloc() on Series with Categorical data
s = Series([1, 2, 3]).astype('category')
# get slice
result = s.iloc[0:2]
expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3]))
tm.assert_series_equal(result, expected)
# get list of indexes
result = s.iloc[[0, 1]]
expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3]))
tm.assert_series_equal(result, expected)
# get boolean array
result = s.iloc[[True, False, False]]
expected = Series([1]).astype(CategoricalDtype([1, 2, 3]))
tm.assert_series_equal(result, expected)
def test_loc_listlike(self):
# list of labels
result = self.df.loc[['c', 'a']]
expected = self.df.iloc[[4, 0, 1, 5]]
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.loc[['a', 'b', 'e']]
exp_index = CategoricalIndex(
list('aaabbe'), categories=list('cabe'), name='B')
expected = DataFrame({'A': [0, 1, 5, 2, 3, np.nan]}, index=exp_index)
assert_frame_equal(result, expected, check_index_type=True)
# element in the categories but not in the values
pytest.raises(KeyError, lambda: self.df2.loc['e'])
# assign is ok
df = self.df2.copy()
df.loc['e'] = 20
result = df.loc[['a', 'b', 'e']]
exp_index = CategoricalIndex(
list('aaabbe'), categories=list('cabe'), name='B')
expected = DataFrame({'A': [0, 1, 5, 2, 3, 20]}, index=exp_index)
assert_frame_equal(result, expected)
df = self.df2.copy()
result = df.loc[['a', 'b', 'e']]
exp_index = CategoricalIndex(
list('aaabbe'), categories=list('cabe'), name='B')
expected = DataFrame({'A': [0, 1, 5, 2, 3, np.nan]}, index=exp_index)
assert_frame_equal(result, expected, check_index_type=True)
# not all labels in the categories
with pytest.raises(KeyError):
self.df2.loc[['a', 'd']]
def test_loc_listlike_dtypes(self):
# GH 11586
# unique categories and codes
index = CategoricalIndex(['a', 'b', 'c'])
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, index=index)
# unique slice
res = df.loc[['a', 'b']]
exp_index = CategoricalIndex(['a', 'b'],
categories=index.categories)
exp = DataFrame({'A': [1, 2], 'B': [4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp, check_index_type=True)
# duplicated slice
res = df.loc[['a', 'a', 'b']]
exp_index = CategoricalIndex(['a', 'a', 'b'],
categories=index.categories)
exp = DataFrame({'A': [1, 1, 2], 'B': [4, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp, check_index_type=True)
with tm.assert_raises_regex(
KeyError,
'a list-indexer must only include values that are '
'in the categories'):
df.loc[['a', 'x']]
# duplicated categories and codes
index = CategoricalIndex(['a', 'b', 'a'])
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, index=index)
# unique slice
res = df.loc[['a', 'b']]
exp = DataFrame({'A': [1, 3, 2],
'B': [4, 6, 5]},
index=CategoricalIndex(['a', 'a', 'b']))
tm.assert_frame_equal(res, exp, check_index_type=True)
# duplicated slice
res = df.loc[['a', 'a', 'b']]
exp = DataFrame(
{'A': [1, 3, 1, 3, 2],
'B': [4, 6, 4, 6, 5
]}, index=CategoricalIndex(['a', 'a', 'a', 'a', 'b']))
tm.assert_frame_equal(res, exp, check_index_type=True)
with tm.assert_raises_regex(
KeyError,
'a list-indexer must only include values '
'that are in the categories'):
df.loc[['a', 'x']]
# contains unused category
index = CategoricalIndex(
['a', 'b', 'a', 'c'], categories=list('abcde'))
df = DataFrame({'A': [1, 2, 3, 4], 'B': [5, 6, 7, 8]}, index=index)
res = df.loc[['a', 'b']]
exp = DataFrame({'A': [1, 3, 2], 'B': [5, 7, 6]},
index=CategoricalIndex(['a', 'a', 'b'],
categories=list('abcde')))
tm.assert_frame_equal(res, exp, check_index_type=True)
res = df.loc[['a', 'e']]
exp = DataFrame({'A': [1, 3, np.nan], 'B': [5, 7, np.nan]},
index=CategoricalIndex(['a', 'a', 'e'],
categories=list('abcde')))
tm.assert_frame_equal(res, exp, check_index_type=True)
# duplicated slice
res = df.loc[['a', 'a', 'b']]
exp = DataFrame({'A': [1, 3, 1, 3, 2], 'B': [5, 7, 5, 7, 6]},
index=CategoricalIndex(['a', 'a', 'a', 'a', 'b'],
categories=list('abcde')))
tm.assert_frame_equal(res, exp, check_index_type=True)
with tm.assert_raises_regex(
KeyError,
'a list-indexer must only include values '
'that are in the categories'):
df.loc[['a', 'x']]
def test_get_indexer_array(self):
arr = np.array([Timestamp('1999-12-31 00:00:00'),
Timestamp('2000-12-31 00:00:00')], dtype=object)
cats = [Timestamp('1999-12-31 00:00:00'),
Timestamp('2000-12-31 00:00:00')]
ci = CategoricalIndex(cats,
categories=cats,
ordered=False, dtype='category')
result = ci.get_indexer(arr)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_same_categories_same_order(self):
ci = CategoricalIndex(['a', 'b'], categories=['a', 'b'])
result = ci.get_indexer(CategoricalIndex(['b', 'b'],
categories=['a', 'b']))
expected = np.array([1, 1], dtype='intp')
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_same_categories_different_order(self):
# https://github.com/pandas-dev/pandas/issues/19551
ci = CategoricalIndex(['a', 'b'], categories=['a', 'b'])
result = ci.get_indexer(CategoricalIndex(['b', 'b'],
categories=['b', 'a']))
expected = np.array([1, 1], dtype='intp')
tm.assert_numpy_array_equal(result, expected)
def test_getitem_with_listlike(self):
# GH 16115
cats = Categorical([Timestamp('12-31-1999'),
Timestamp('12-31-2000')])
expected = DataFrame([[1, 0], [0, 1]], dtype='uint8',
index=[0, 1], columns=cats)
dummies = pd.get_dummies(cats)
result = dummies[[c for c in dummies.columns]]
assert_frame_equal(result, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
tm.assert_numpy_array_equal(result, np.array([5], dtype='int8'))
def test_ix_categorical_index(self):
# GH 12531
df = DataFrame(np.random.randn(3, 3),
index=list('ABC'), columns=list('XYZ'))
cdf = df.copy()
cdf.index = CategoricalIndex(df.index)
cdf.columns = CategoricalIndex(df.columns)
expect = Series(df.loc['A', :], index=cdf.columns, name='A')
assert_series_equal(cdf.loc['A', :], expect)
expect = Series(df.loc[:, 'X'], index=cdf.index, name='X')
assert_series_equal(cdf.loc[:, 'X'], expect)
exp_index = CategoricalIndex(list('AB'), categories=['A', 'B', 'C'])
expect = DataFrame(df.loc[['A', 'B'], :], columns=cdf.columns,
index=exp_index)
assert_frame_equal(cdf.loc[['A', 'B'], :], expect)
exp_columns = CategoricalIndex(list('XY'),
categories=['X', 'Y', 'Z'])
expect = DataFrame(df.loc[:, ['X', 'Y']], index=cdf.index,
columns=exp_columns)
assert_frame_equal(cdf.loc[:, ['X', 'Y']], expect)
# non-unique
df = DataFrame(np.random.randn(3, 3),
index=list('ABA'), columns=list('XYX'))
cdf = df.copy()
cdf.index = CategoricalIndex(df.index)
cdf.columns = CategoricalIndex(df.columns)
exp_index = CategoricalIndex(list('AA'), categories=['A', 'B'])
expect = DataFrame(df.loc['A', :], columns=cdf.columns,
index=exp_index)
assert_frame_equal(cdf.loc['A', :], expect)
exp_columns = CategoricalIndex(list('XX'), categories=['X', 'Y'])
expect = DataFrame(df.loc[:, 'X'], index=cdf.index,
columns=exp_columns)
assert_frame_equal(cdf.loc[:, 'X'], expect)
expect = DataFrame(df.loc[['A', 'B'], :], columns=cdf.columns,
index=CategoricalIndex(list('AAB')))
assert_frame_equal(cdf.loc[['A', 'B'], :], expect)
expect = DataFrame(df.loc[:, ['X', 'Y']], index=cdf.index,
columns=CategoricalIndex(list('XXY')))
assert_frame_equal(cdf.loc[:, ['X', 'Y']], expect)
def test_read_only_source(self):
# GH 10043
rw_array = np.eye(10)
rw_df = DataFrame(rw_array)
ro_array = np.eye(10)
ro_array.setflags(write=False)
ro_df = DataFrame(ro_array)
assert_frame_equal(rw_df.iloc[[1, 2, 3]], ro_df.iloc[[1, 2, 3]])
assert_frame_equal(rw_df.iloc[[1]], ro_df.iloc[[1]])
assert_series_equal(rw_df.iloc[1], ro_df.iloc[1])
assert_frame_equal(rw_df.iloc[1:3], ro_df.iloc[1:3])
assert_frame_equal(rw_df.loc[[1, 2, 3]], ro_df.loc[[1, 2, 3]])
assert_frame_equal(rw_df.loc[[1]], ro_df.loc[[1]])
assert_series_equal(rw_df.loc[1], ro_df.loc[1])
assert_frame_equal(rw_df.loc[1:3], ro_df.loc[1:3])
def test_reindexing(self):
# reindexing
# convert to a regular index
result = self.df2.reindex(['a', 'b', 'e'])
expected = DataFrame({'A': [0, 1, 5, 2, 3, np.nan],
'B': Series(list('aaabbe'))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['a', 'b'])
expected = DataFrame({'A': [0, 1, 5, 2, 3],
'B': Series(list('aaabb'))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['e'])
expected = DataFrame({'A': [np.nan],
'B': Series(['e'])}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['d'])
expected = DataFrame({'A': [np.nan],
'B': Series(['d'])}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
# since we are actually reindexing with a Categorical
# then return a Categorical
cats = list('cabe')
result = self.df2.reindex(Categorical(['a', 'd'], categories=cats))
expected = DataFrame({'A': [0, 1, 5, np.nan],
'B': Series(list('aaad')).astype(
CDT(cats))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(Categorical(['a'], categories=cats))
expected = DataFrame({'A': [0, 1, 5],
'B': Series(list('aaa')).astype(
CDT(cats))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['a', 'b', 'e'])
expected = DataFrame({'A': [0, 1, 5, 2, 3, np.nan],
'B': Series(list('aaabbe'))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['a', 'b'])
expected = DataFrame({'A': [0, 1, 5, 2, 3],
'B': Series(list('aaabb'))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['e'])
expected = DataFrame({'A': [np.nan],
'B': Series(['e'])}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
# give back the type of categorical that we received
result = self.df2.reindex(Categorical(
['a', 'd'], categories=cats, ordered=True))
expected = DataFrame(
{'A': [0, 1, 5, np.nan],
'B': Series(list('aaad')).astype(
CDT(cats, ordered=True))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(Categorical(
['a', 'd'], categories=['a', 'd']))
expected = DataFrame({'A': [0, 1, 5, np.nan],
'B': Series(list('aaad')).astype(
CDT(['a', 'd']))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
# passed duplicate indexers are not allowed
pytest.raises(ValueError, lambda: self.df2.reindex(['a', 'a']))
# args NotImplemented ATM
pytest.raises(NotImplementedError,
lambda: self.df2.reindex(['a'], method='ffill'))
pytest.raises(NotImplementedError,
lambda: self.df2.reindex(['a'], level=1))
pytest.raises(NotImplementedError,
lambda: self.df2.reindex(['a'], limit=2))
def test_loc_slice(self):
# slicing
# not implemented ATM
# GH9748
pytest.raises(TypeError, lambda: self.df.loc[1:5])
# result = df.loc[1:5]
# expected = df.iloc[[1,2,3,4]]
# assert_frame_equal(result, expected)
def test_boolean_selection(self):
df3 = self.df3
df4 = self.df4
result = df3[df3.index == 'a']
expected = df3.iloc[[]]
assert_frame_equal(result, expected)
result = df4[df4.index == 'a']
expected = df4.iloc[[]]
assert_frame_equal(result, expected)
result = df3[df3.index == 1]
expected = df3.iloc[[0, 1, 3]]
assert_frame_equal(result, expected)
result = df4[df4.index == 1]
expected = df4.iloc[[0, 1, 3]]
assert_frame_equal(result, expected)
# since we have an ordered categorical
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=True,
# name=u'B')
result = df3[df3.index < 2]
expected = df3.iloc[[4]]
assert_frame_equal(result, expected)
result = df3[df3.index > 1]
expected = df3.iloc[[]]
assert_frame_equal(result, expected)
# unordered
# cannot be compared
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=False,
# name=u'B')
pytest.raises(TypeError, lambda: df4[df4.index < 2])
pytest.raises(TypeError, lambda: df4[df4.index > 1])
def test_indexing_with_category(self):
# https://github.com/pandas-dev/pandas/issues/12564
# consistent result if comparing as Dataframe
cat = DataFrame({'A': ['foo', 'bar', 'baz']})
exp = DataFrame({'A': [True, False, False]})
res = (cat[['A']] == 'foo')
tm.assert_frame_equal(res, exp)
cat['A'] = cat['A'].astype('category')
res = (cat[['A']] == 'foo')
tm.assert_frame_equal(res, exp)
def test_map_with_dict_or_series(self):
orig_values = ['a', 'B', 1, 'a']
new_values = ['one', 2, 3.0, 'one']
cur_index = pd.CategoricalIndex(orig_values, name='XXX')
expected = pd.CategoricalIndex(new_values,
name='XXX', categories=[3.0, 2, 'one'])
mapper = pd.Series(new_values[:-1], index=orig_values[:-1])
output = cur_index.map(mapper)
# Order of categories in output can be different
tm.assert_index_equal(expected, output)
mapper = {o: n for o, n in
zip(orig_values[:-1], new_values[:-1])}
output = cur_index.map(mapper)
# Order of categories in output can be different
tm.assert_index_equal(expected, output)
| bsd-3-clause |
elenita1221/BDA_py_demos | demos_pystan/pystan_demo.py | 19 | 12220 | """Bayesian Data Analysis, 3rd ed
PyStan demo
Demo for using Stan with Python interface PyStan.
"""
import numpy as np
import pystan
import matplotlib.pyplot as plt
# edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# ====== Bernoulli model =======================================================
bernoulli_code = """
data {
int<lower=0> N;
int<lower=0,upper=1> y[N];
}
parameters {
real<lower=0,upper=1> theta;
}
model {
theta ~ beta(1,1);
for (n in 1:N)
y[n] ~ bernoulli(theta);
}
"""
data = dict(N=10, y=[0,1,0,0,1,1,1,0,1,0])
fit = pystan.stan(model_code=bernoulli_code, data=data)
print(fit)
samples = fit.extract(permuted=True)
plt.hist(samples['theta'], 50)
plt.show()
# ====== Vectorized Bernoulli model ============================================
bernoulli_code = """
data {
int<lower=0> N;
int<lower=0,upper=1> y[N];
}
parameters {
real<lower=0,upper=1> theta;
}
model {
theta ~ beta(1,1);
y ~ bernoulli(theta);
}
"""
data = dict(N=10, y=[1,1,1,0,1,1,1,0,1,1])
fit = pystan.stan(model_code=bernoulli_code, data=data)
# ====== Binomial model ========================================================
binomial_code = """
data {
int<lower=0> N;
int<lower=0> y;
}
parameters {
real<lower=0,upper=1> theta;
}
model {
theta ~ beta(1,1);
y ~ binomial(N,theta);
}
"""
data = dict(N=10, y=8)
fit = pystan.stan(model_code=binomial_code, data=data)
samples = fit.extract(permuted=True)
plt.hist(samples['theta'], 50)
plt.show()
# ====== Re-running Binomial model with new data ===============================
data = dict(N=10, y=10)
fit = pystan.stan(fit=fit, data=data)
samples = fit.extract(permuted=True)
plt.hist(samples['theta'], 50)
plt.show()
# ====== Comparison of two groups with Binomial ================================
binomial_code = """
data {
int<lower=0> N1;
int<lower=0> y1;
int<lower=0> N2;
int<lower=0> y2;
}
parameters {
real<lower=0,upper=1> theta1;
real<lower=0,upper=1> theta2;
}
transformed parameters {
real oddsratio;
oddsratio <- (theta2/(1-theta2))/(theta1/(1-theta1));
}
model {
theta1 ~ beta(1,1);
theta2 ~ beta(1,1);
y1 ~ binomial(N1,theta1);
y2 ~ binomial(N2,theta2);
}
"""
data = dict(N1=674, y1=39, N2=680, y2=22)
fit = pystan.stan(model_code=binomial_code, data=data)
samples = fit.extract(permuted=True)
plt.hist(samples['oddsratio'], 50)
plt.show()
# ====== Gaussian linear model =================================================
linear_code = """
data {
int<lower=0> N; // number of data points
vector[N] x; //
vector[N] y; //
}
parameters {
real alpha;
real beta;
real<lower=0> sigma;
}
transformed parameters {
vector[N] mu;
mu <- alpha + beta*x;
}
model {
y ~ normal(mu, sigma);
}
"""
# Data for Stan
d = np.loadtxt('kilpisjarvi-summer-temp.csv', dtype=np.double, delimiter=';',
skiprows=1)
x = np.repeat(d[:,0], 4)
y = d[:,1:5].ravel()
N = len(x)
data = dict(N=N, x=x, y=y)
# Compile and fit the model
fit = pystan.stan(model_code=linear_code, data=data)
# Plot
samples = fit.extract(permuted=True)
plt.figure(figsize=(8,10))
plt.subplot(3,1,1)
plt.plot(x,
np.percentile(samples['mu'], 50, axis=0),
color='#e41a1c',
linewidth=1
)
plt.plot(
x,
np.asarray(np.percentile(samples['mu'], [5, 95], axis=0)).T,
color='#e41a1c',
linestyle='--',
linewidth=1,
)
plt.scatter(x, y, 5, color='#377eb8')
plt.xlabel('Year')
plt.ylabel('Summer temperature at Kilpisjarvi')
plt.xlim((1952,2013))
plt.subplot(3,1,2)
plt.hist(samples['beta'], 50)
plt.xlabel('beta')
print 'Pr(beta > 0) = {}'.format(np.mean(samples['beta']>0))
plt.subplot(3,1,3)
plt.hist(samples['sigma'], 50)
plt.xlabel('sigma')
plt.tight_layout()
plt.show()
# ====== Gaussian linear model with adjustable priors ==========================
linear_code = """
data {
int<lower=0> N; // number of data points
vector[N] x; //
vector[N] y; //
real pmualpha; // prior mean for alpha
real psalpha; // prior std for alpha
real pmubeta; // prior mean for beta
real psbeta; // prior std for beta
}
parameters {
real alpha;
real beta;
real<lower=0> sigma;
}
transformed parameters {
vector[N] mu;
mu <- alpha + beta*x;
}
model {
alpha ~ normal(pmualpha,psalpha);
beta ~ normal(pmubeta,psbeta);
y ~ normal(mu, sigma);
}
"""
# Data for Stan
d = np.loadtxt('kilpisjarvi-summer-temp.csv', dtype=np.double, delimiter=';',
skiprows=1)
x = np.repeat(d[:,0], 4)
y = d[:,1:5].ravel()
N = len(x)
data = dict(
N = N,
x = x,
y = y,
pmualpha = y.mean(), # Centered
psalpha = (14-4)/6.0, # avg temp between 4-14
pmubeta = 0, # a priori increase and decrese as likely
psbeta = (.1--.1)/6.0 # avg temp probably does not increase more than 1
# degree per 10 years
)
# Compile and fit the model
fit = pystan.stan(model_code=linear_code, data=data)
# Plot
samples = fit.extract(permuted=True)
plt.figure(figsize=(8,10))
plt.subplot(3,1,1)
plt.plot(x,
np.percentile(samples['mu'], 50, axis=0),
color='#e41a1c',
linewidth=1
)
plt.plot(
x,
np.asarray(np.percentile(samples['mu'], [5, 95], axis=0)).T,
color='#e41a1c',
linestyle='--',
linewidth=1,
)
plt.scatter(x, y, 5, color='#377eb8')
plt.xlabel('Year')
plt.ylabel('Summer temperature at Kilpisjarvi')
plt.xlim((1952,2013))
plt.subplot(3,1,2)
plt.hist(samples['beta'], 50)
plt.xlabel('beta')
print 'Pr(beta > 0) = {}'.format(np.mean(samples['beta']>0))
plt.subplot(3,1,3)
plt.hist(samples['sigma'], 50)
plt.xlabel('sigma')
plt.tight_layout()
plt.show()
# ====== Gaussian linear model with standardized data ==========================
linear_code = """
data {
int<lower=0> N; // number of data points
vector[N] x; //
vector[N] y; //
}
transformed data {
vector[N] x_std;
vector[N] y_std;
x_std <- (x - mean(x)) / sd(x);
y_std <- (y - mean(y)) / sd(y);
}
parameters {
real alpha;
real beta;
real<lower=0> sigma_std;
}
transformed parameters {
vector[N] mu_std;
mu_std <- alpha + beta*x_std;
}
model {
alpha ~ normal(0,1);
beta ~ normal(0,1);
y_std ~ normal(mu_std, sigma_std);
}
generated quantities {
vector[N] mu;
real<lower=0> sigma;
mu <- mean(y) + mu_std*sd(y);
sigma <- sigma_std*sd(y);
}
"""
# Data for Stan
data_path = '../utilities_and_data/kilpisjarvi-summer-temp.csv'
d = np.loadtxt(data_path, dtype=np.double, delimiter=';', skiprows=1)
x = np.repeat(d[:,0], 4)
y = d[:,1:5].ravel()
N = len(x)
data = dict(N = N, x = x, y = y)
# Compile and fit the model
fit = pystan.stan(model_code=linear_code, data=data)
# Plot
samples = fit.extract(permuted=True)
plt.figure(figsize=(8,10))
plt.subplot(3,1,1)
plt.plot(x,
np.percentile(samples['mu'], 50, axis=0),
color='#e41a1c',
linewidth=1
)
plt.plot(
x,
np.asarray(np.percentile(samples['mu'], [5, 95], axis=0)).T,
color='#e41a1c',
linestyle='--',
linewidth=1,
)
plt.scatter(x, y, 5, color='#377eb8')
plt.xlabel('Year')
plt.ylabel('Summer temperature at Kilpisjarvi')
plt.xlim((1952,2013))
plt.subplot(3,1,2)
plt.hist(samples['beta'], 50)
plt.xlabel('beta')
print 'Pr(beta > 0) = {}'.format(np.mean(samples['beta']>0))
plt.subplot(3,1,3)
plt.hist(samples['sigma'], 50)
plt.xlabel('sigma')
plt.tight_layout()
plt.show()
# ====== Gaussian linear student-t model =======================================
linear_code = """
data {
int<lower=0> N; // number of data points
vector[N] x; //
vector[N] y; //
}
parameters {
real alpha;
real beta;
real<lower=0> sigma;
real<lower=1,upper=80> nu;
}
transformed parameters {
vector[N] mu;
mu <- alpha + beta*x;
}
model {
nu ~ gamma(2,0.1); // Juarez and Steel (2010)
y ~ student_t(nu, mu, sigma);
}
"""
# Data for Stan
data_path = '../utilities_and_data/kilpisjarvi-summer-temp.csv'
d = np.loadtxt(data_path, dtype=np.double, delimiter=';', skiprows=1)
x = np.repeat(d[:,0], 4)
y = d[:,1:5].ravel()
N = len(x)
data = dict(N = N, x = x, y = y)
# Compile and fit the model
fit = pystan.stan(model_code=linear_code, data=data)
# Plot
samples = fit.extract(permuted=True)
plt.figure(figsize=(8,12))
plt.subplot(4,1,1)
plt.plot(x,
np.percentile(samples['mu'], 50, axis=0),
color='#e41a1c',
linewidth=1
)
plt.plot(
x,
np.asarray(np.percentile(samples['mu'], [5, 95], axis=0)).T,
color='#e41a1c',
linestyle='--',
linewidth=1,
)
plt.scatter(x, y, 5, color='#377eb8')
plt.xlabel('Year')
plt.ylabel('Summer temperature at Kilpisjarvi')
plt.xlim((1952,2013))
plt.subplot(4,1,2)
plt.hist(samples['beta'], 50)
plt.xlabel('beta')
print 'Pr(beta > 0) = {}'.format(np.mean(samples['beta']>0))
plt.subplot(4,1,3)
plt.hist(samples['sigma'], 50)
plt.xlabel('sigma')
plt.subplot(4,1,4)
plt.hist(samples['nu'], 50)
plt.xlabel('nu')
plt.tight_layout()
plt.show()
# ====== Comparison of k groups (ANOVA) ========================================
group_code = """
data {
int<lower=0> N; // number of data points
int<lower=0> K; // number of groups
int<lower=1,upper=K> x[N]; // group indicator
vector[N] y; //
}
parameters {
vector[K] mu; // group means
vector<lower=0>[K] sigma; // group stds
}
model {
for (n in 1:N)
y[n] ~ normal(mu[x[n]], sigma[x[n]]);
}
"""
# Data for Stan
data_path = '../utilities_and_data/kilpisjarvi-summer-temp.csv'
d = np.loadtxt(data_path, dtype=np.double, delimiter=';', skiprows=1)
# Is there difference between different summer months?
x = np.tile(np.arange(1,5), d.shape[0]) # summer months are numbered from 1 to 4
y = d[:,1:5].ravel()
N = len(x)
data = dict(
N = N,
K = 4, # 4 groups
x = x, # group indicators
y = y # observations
)
# Compile and fit the model
fit = pystan.stan(model_code=group_code, data=data)
# Analyse results
mu = fit.extract(permuted=True)['mu']
# Matrix of probabilities that one mu is larger than other
ps = np.zeros((4,4))
for k1 in range(4):
for k2 in range(k1+1,4):
ps[k1,k2] = np.mean(mu[:,k1]>mu[:,k2])
ps[k2,k1] = 1 - ps[k1,k2]
print "Matrix of probabilities that one mu is larger than other:"
print ps
# Plot
plt.boxplot(mu)
plt.show()
# ====== Hierarchical prior model for comparison of k groups (ANOVA) ===========
# results do not differ much from the previous, because there is only
# few groups and quite much data per group, but this works as an example anyway
hier_code = """
data {
int<lower=0> N; // number of data points
int<lower=0> K; // number of groups
int<lower=1,upper=K> x[N]; // group indicator
vector[N] y; //
}
parameters {
real mu0; // prior mean
real<lower=0> sigma0; // prior std
vector[K] mu; // group means
vector<lower=0>[K] sigma; // group stds
}
model {
mu0 ~ normal(10,10); // weakly informative prior
sigma0 ~ cauchy(0,4); // weakly informative prior
mu ~ normal(mu0, sigma0); // population prior with unknown parameters
for (n in 1:N)
y[n] ~ normal(mu[x[n]], sigma[x[n]]);
}
"""
# Data for Stan
data_path = '../utilities_and_data/kilpisjarvi-summer-temp.csv'
d = np.loadtxt(data_path, dtype=np.double, delimiter=';', skiprows=1)
# Is there difference between different summer months?
x = np.tile(np.arange(1,5), d.shape[0]) # summer months are numbered from 1 to 4
y = d[:,1:5].ravel()
N = len(x)
data = dict(
N = N,
K = 4, # 4 groups
x = x, # group indicators
y = y # observations
)
# Compile and fit the model
fit = pystan.stan(model_code=hier_code, data=data)
# Analyse results
samples = fit.extract(permuted=True)
print "std(mu0): {}".format(np.std(samples['mu0']))
mu = samples['mu']
# Matrix of probabilities that one mu is larger than other
ps = np.zeros((4,4))
for k1 in range(4):
for k2 in range(k1+1,4):
ps[k1,k2] = np.mean(mu[:,k1]>mu[:,k2])
ps[k2,k1] = 1 - ps[k1,k2]
print "Matrix of probabilities that one mu is larger than other:"
print ps
# Plot
plt.boxplot(mu)
plt.show()
| gpl-3.0 |
hrjn/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 225 | 6278 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
Obus/scikit-learn | sklearn/kernel_ridge.py | 44 | 6504 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
jakirkham/bokeh | examples/models/file/latex_extension.py | 3 | 2596 | """ The LaTex example was derived from: http://matplotlib.org/users/usetex.html
"""
from bokeh.models import Label
from bokeh.palettes import Spectral4
from bokeh.plotting import output_file, figure, show
import numpy as np
from scipy.special import jv
output_file('external_resources.html')
class LatexLabel(Label):
"""A subclass of `Label` with all of the same class attributes except
canvas mode isn't supported and DOM manipulation happens in the coffeescript
superclass implementation that requires setting `render_mode='css'`).
Only the render method of LabelView is overwritten to perform the
text -> latex (via katex) conversion
"""
__javascript__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.9.0/katex.min.js"]
__css__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.9.0/katex.min.css"]
__implementation__ = """
import {Label, LabelView} from "models/annotations/label"
export class LatexLabelView extends LabelView
render: () ->
# Here because AngleSpec does units tranform and label doesn't support specs
switch @model.angle_units
when "rad" then angle = -1 * @model.angle
when "deg" then angle = -1 * @model.angle * Math.PI/180.0
panel = @model.panel ? @plot_view.frame
xscale = @plot_view.frame.xscales[@model.x_range_name]
yscale = @plot_view.frame.yscales[@model.y_range_name]
sx = if @model.x_units == "data" then xscale.compute(@model.x) else panel.xview.compute(@model.x)
sy = if @model.y_units == "data" then yscale.compute(@model.y) else panel.yview.compute(@model.y)
sx += @model.x_offset
sy -= @model.y_offset
@_css_text(@plot_view.canvas_view.ctx, "", sx, sy, angle)
katex.render(@model.text, @el, {displayMode: true})
export class LatexLabel extends Label
type: 'LatexLabel'
default_view: LatexLabelView
"""
p = figure(title="LaTex Extension Demonstration", plot_width=800, plot_height=350,
background_fill_color="#fafafa")
p.x_range.range_padding = 0
x = np.arange(0.0, 20.0, 0.02)
for i, n in enumerate([0, 1, 4, 7]):
p.line(x, jv(n, x), line_width=3, color=Spectral4[i], alpha=0.8, legend="𝜈=%d" % n)
text = (r"\text{Bessel Functions of the First Kind: }" +
r"J_\nu = \sum_{m=0}^{\infty}\frac{(-1)^m}{m!\ \Gamma(m+\nu+1)}" +
r"\left(\frac{x}{2}\right)^{2m+\nu}")
latex = LatexLabel(text=text,x=4.5, y=250, x_units='data', y_units='screen',
render_mode='css', text_font_size='8pt',
background_fill_color="white", border_line_color="lightgrey")
p.add_layout(latex)
show(p)
| bsd-3-clause |
jermainewang/mxnet | example/kaggle-ndsb1/training_curves.py | 52 | 1879 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
## based on https://github.com/dmlc/mxnet/issues/1302
## Parses the model fit log file and generates a train/val vs epoch plot
import matplotlib.pyplot as plt
import numpy as np
import re
import argparse
parser = argparse.ArgumentParser(description='Parses log file and generates train/val curves')
parser.add_argument('--log-file', type=str,default="log_tr_va",
help='the path of log file')
args = parser.parse_args()
TR_RE = re.compile('.*?]\sTrain-accuracy=([\d\.]+)')
VA_RE = re.compile('.*?]\sValidation-accuracy=([\d\.]+)')
log = open(args.log_file).read()
log_tr = [float(x) for x in TR_RE.findall(log)]
log_va = [float(x) for x in VA_RE.findall(log)]
idx = np.arange(len(log_tr))
plt.figure(figsize=(8, 6))
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.plot(idx, log_tr, 'o', linestyle='-', color="r",
label="Train accuracy")
plt.plot(idx, log_va, 'o', linestyle='-', color="b",
label="Validation accuracy")
plt.legend(loc="best")
plt.xticks(np.arange(min(idx), max(idx)+1, 5))
plt.yticks(np.arange(0, 1, 0.2))
plt.ylim([0,1])
plt.show()
| apache-2.0 |
rinman24/ucsd_ch | coimbra_chamber/tests/access/experiment/exp_acc_integration_test.py | 1 | 24445 | """Integration test suite for ChamberAccess."""
import dataclasses
import datetime
from decimal import Decimal
from unittest.mock import MagicMock
import dacite
import pytest
from nptdms import TdmsFile
from pandas import DataFrame
from pytz import utc
from sqlalchemy import and_
from coimbra_chamber.access.experiment.contracts import TemperatureSpec
from coimbra_chamber.access.experiment.models import (
Experiment,
Fit,
Observation,
Tube,
Setting,
Temperature)
from coimbra_chamber.access.experiment.service import ExperimentAccess
from coimbra_chamber.tests.conftest import tdms_path
# ----------------------------------------------------------------------------
# ChamberAccess
# _add_tube ------------------------------------------------------------------
def test_add_tube_that_does_not_exist(exp_acc, tube_spec): # noqa: D103
# Act --------------------------------------------------------------------
tube_id = exp_acc._add_tube(tube_spec)
# Assert -----------------------------------------------------------------
assert tube_id == 1
# Now query result -------------------------------------------------------
session = exp_acc.Session()
try:
query = session.query(Tube).filter(Tube.material == 'test_material')
result = query.one()
session.commit()
assert result.inner_diameter == Decimal('0.1000')
assert result.outer_diameter == Decimal('0.2000')
assert result.height == Decimal('0.3000')
assert result.mass == Decimal('0.4000000')
finally:
session.close()
def test_add_tube_that_already_exists(exp_acc, tube_spec): # noqa: D103
# Arrange ----------------------------------------------------------------
# NOTE: The test above already added the tube
# NOTE: These tests are intended to be run sequently
exp_acc._add_tube(tube_spec)
# Act --------------------------------------------------------------------
new_tube_id = exp_acc._add_tube(tube_spec)
# Assert -----------------------------------------------------------------
assert new_tube_id == 1
# _add_setting ---------------------------------------------------------------
def test_add_setting_that_does_not_exist(exp_acc, setting_spec): # noqa: D103
# Act --------------------------------------------------------------------
setting_id = exp_acc._add_setting(setting_spec)
# Assert -----------------------------------------------------------------
assert setting_id == 1
# Now query result -------------------------------------------------------
session = exp_acc.Session()
try:
query = session.query(Setting)
query = query.filter(Setting.pressure == setting_spec.pressure)
result = query.one()
session.commit()
assert result.duty == Decimal('0.0')
assert result.temperature == Decimal('290.0')
assert result.time_step == Decimal('1.0')
finally:
session.close()
def test_add_setting_that_already_exists(exp_acc, setting_spec): # noqa: D103
# Arrange ----------------------------------------------------------------
# NOTE: The test above already added the setting
# NOTE: These tests are intended to be run sequently
exp_acc._add_setting(setting_spec)
# Act --------------------------------------------------------------------
new_setting_id = exp_acc._add_setting(setting_spec)
# Assert -----------------------------------------------------------------
assert new_setting_id == 1
# _add_experiment ------------------------------------------------------------
def test_add_experiment_that_does_not_exist(exp_acc, experiment_spec): # noqa: D103
# Act --------------------------------------------------------------------
setting_id = 1
experiment_id = exp_acc._add_experiment(experiment_spec, setting_id)
# Assert -----------------------------------------------------------------
assert experiment_id == 1
# Now query result -------------------------------------------------------
session = exp_acc.Session()
try:
query = session.query(Experiment)
query = query.filter(Experiment.datetime == experiment_spec.datetime)
result = query.one()
session.commit()
assert result.author == 'RHI'
assert result.description == 'The description is descriptive.'
assert result.tube_id == 1
assert result.setting_id == 1
finally:
session.close()
def test_add_experiment_that_already_exists(exp_acc, experiment_spec): # noqa: D103
# Arrange ----------------------------------------------------------------
# NOTE: The test above already added the experiment
# NOTE: These tests are intended to be run sequently
setting_id = 1
exp_acc._add_experiment(experiment_spec, setting_id)
# Act --------------------------------------------------------------------
new_experiment_id = exp_acc._add_experiment(experiment_spec, setting_id)
# Assert -----------------------------------------------------------------
assert new_experiment_id == 1
# _add_observations ----------------------------------------------------------
def test_add_observations_that_do_not_exist(exp_acc, observation_spec): # noqa: D103
# Arrange ----------------------------------------------------------------
experiment_id = 1
# Act --------------------------------------------------------------------
returned_dict = exp_acc._add_observations(observation_spec, experiment_id)
# Assert -----------------------------------------------------------------
assert returned_dict == dict(observations=2, temperatures=6)
# Now query result -------------------------------------------------------
session = exp_acc.Session()
try:
query = session.query(Observation)
query = query.filter(Observation.experiment_id == experiment_id)
observations = query.all()
for observation in observations:
if observation.idx == 0:
assert observation.cap_man_ok
assert observation.dew_point == Decimal('280.12')
assert observation.idx == 0
assert observation.mass == Decimal('0.1234567')
assert observation.optidew_ok
assert observation.pow_out == 0
assert observation.pow_ref == 0
assert observation.pressure == 987654
assert observation.surface_temp == Decimal('290.0')
assert observation.ic_temp == Decimal('291.00')
elif observation.idx == 1:
assert not observation.cap_man_ok
assert observation.dew_point == Decimal('280.20')
assert observation.idx == 1
assert observation.mass == Decimal('0.1222222')
assert not observation.optidew_ok
assert observation.pow_out == 0
assert observation.pow_ref == 0
assert observation.pressure == 987000
assert observation.surface_temp == Decimal('290.2')
assert observation.ic_temp == Decimal('291.20')
query = session.query(Temperature)
temperatures = query.filter(Temperature.experiment_id == experiment_id)
for temperature in temperatures:
if temperature.idx == 0:
if temperature.thermocouple_num == 0:
assert temperature.temperature == Decimal('300.0')
elif temperature.thermocouple_num == 1:
assert temperature.temperature == Decimal('300.2')
elif temperature.thermocouple_num == 2:
assert temperature.temperature == Decimal('300.4')
elif temperature.idx == 1:
if temperature.thermocouple_num == 0:
assert temperature.temperature == Decimal('301.0')
elif temperature.thermocouple_num == 1:
assert temperature.temperature == Decimal('301.2')
elif temperature.thermocouple_num == 2:
assert temperature.temperature == Decimal('301.4')
session.commit()
finally:
session.close()
def test_add_observations_that_already_exist(exp_acc, observation_spec): # noqa: D103
# Arrange ----------------------------------------------------------------
# NOTE: The test above already added the observations
# NOTE: These tests are intended to be run sequently
experiment_id = 1
# Act --------------------------------------------------------------------
returned_dict = exp_acc._add_observations(observation_spec, experiment_id)
# Assert -----------------------------------------------------------------
assert returned_dict == dict(observations=2, temperatures=6)
# add_raw_data ---------------------------------------------------------------
@pytest.mark.parametrize('tube_id', [1, 2])
def test_add_raw_data(exp_acc, data_spec, tube_id, monkeypatch): # noqa: D103
# Arrange ----------------------------------------------------------------
# NOTE: The tests above have already added the this to the database for
# tube_id == 1, but not for tube_id == 2.
changes = dict(tube_id=tube_id)
experimental_spec = dataclasses.replace(data_spec.experiment, **changes)
changes = dict(experiment=experimental_spec)
data_spec = dataclasses.replace(data_spec, **changes)
# If the tube_id == 2, then we want to ask the user for that info.
# This requires a mock.
user_input = [['0.1111'], ['0.2222'], ['0.3333'], ['0.4444'],
['test_material']]
mock_io = MagicMock(side_effect=user_input)
monkeypatch.setattr(
'coimbra_chamber.utility.io.service.IOUtility.get_input',
mock_io)
# Act --------------------------------------------------------------------
result = exp_acc.add_raw_data(data_spec)
# Assert -----------------------------------------------------------------
if tube_id == 1:
assert result['tube_id'] == 1
assert result['setting_id'] == 1
assert result['experiment_id'] == 1
assert result['observations'] == 2
assert result['temperatures'] == 6
else:
assert result['tube_id'] == 2
assert result['setting_id'] == 1
assert result['experiment_id'] == 1
assert result['observations'] == 2
assert result['temperatures'] == 6
# connect --------------------------------------------------------------------
@pytest.mark.parametrize('filepath', [tdms_path, 'bad_path'])
def test_connect(exp_acc, filepath): # noqa: D103
# Act --------------------------------------------------------------------
exp_acc._connect(filepath)
# Assert -----------------------------------------------------------------
if filepath:
assert isinstance(exp_acc._tdms_file, TdmsFile)
assert isinstance(exp_acc._settings, DataFrame)
assert isinstance(exp_acc._data, DataFrame)
else:
assert not hasattr(exp_acc._tdms_file)
# get_temperature_spec -------------------------------------------------------
@pytest.mark.parametrize('index', [0, 1, 2])
def test_get_temperature_spec(exp_acc, index): # noqa: D103
# Arrange ----------------------------------------------------------------
exp_acc._connect(tdms_path)
# Act --------------------------------------------------------------------
results = exp_acc._get_temperature_specs(index)
# Assert -----------------------------------------------------------------
for temp_spec in results:
assert isinstance(temp_spec, TemperatureSpec)
tc_num = temp_spec.thermocouple_num
if index == 0:
if tc_num == 4:
assert temp_spec.temperature == Decimal('290.21')
elif tc_num == 5:
assert temp_spec.temperature == Decimal('289.9')
elif tc_num == 6:
assert temp_spec.temperature == Decimal('289.88')
elif tc_num == 7:
assert temp_spec.temperature == Decimal('290.21')
elif tc_num == 8:
assert temp_spec.temperature == Decimal('290.21')
elif tc_num == 9:
assert temp_spec.temperature == Decimal('289.82')
elif tc_num == 10:
assert temp_spec.temperature == Decimal('289.72')
elif tc_num == 11:
assert temp_spec.temperature == Decimal('289.91')
elif tc_num == 12:
assert temp_spec.temperature == Decimal('289.7')
else: # index must be 13
assert temp_spec.temperature == Decimal('290.1')
elif index == 1:
if tc_num == 4:
assert temp_spec.temperature == Decimal('290.23')
elif tc_num == 5:
assert temp_spec.temperature == Decimal('289.9')
elif tc_num == 6:
assert temp_spec.temperature == Decimal('289.89')
elif tc_num == 7:
assert temp_spec.temperature == Decimal('290.23')
elif tc_num == 8:
assert temp_spec.temperature == Decimal('290.22')
elif tc_num == 9:
assert temp_spec.temperature == Decimal('289.83')
elif tc_num == 10:
assert temp_spec.temperature == Decimal('289.73')
elif tc_num == 11:
assert temp_spec.temperature == Decimal('289.92')
elif tc_num == 12:
assert temp_spec.temperature == Decimal('289.72')
else: # index must be 13
assert temp_spec.temperature == Decimal('290.11')
else: # index must be 2
if tc_num == 4:
assert temp_spec.temperature == Decimal('290.23')
elif tc_num == 5:
assert temp_spec.temperature == Decimal('289.91')
elif tc_num == 6:
assert temp_spec.temperature == Decimal('289.9')
elif tc_num == 7:
assert temp_spec.temperature == Decimal('290.23')
elif tc_num == 8:
assert temp_spec.temperature == Decimal('290.23')
elif tc_num == 9:
assert temp_spec.temperature == Decimal('289.84')
elif tc_num == 10:
assert temp_spec.temperature == Decimal('289.74')
elif tc_num == 11:
assert temp_spec.temperature == Decimal('289.93')
elif tc_num == 12:
assert temp_spec.temperature == Decimal('289.73')
else: # index must be 13
assert temp_spec.temperature == Decimal('290.11')
# get_observation_spec -------------------------------------------------------
@pytest.mark.parametrize('index', [0, 1, 2])
def test_get_observation_sepc(exp_acc, index): # noqa: D103
# Arrange ----------------------------------------------------------------
exp_acc._connect(tdms_path)
# Act --------------------------------------------------------------------
results = exp_acc._get_observation_specs(index)
# Assert -----------------------------------------------------------------
for temp_spec in results.temperatures:
assert isinstance(temp_spec, TemperatureSpec)
if index == 0:
assert results.cap_man_ok is True
assert results.dew_point == Decimal('284.29')
assert results.idx == 1
assert results.mass == Decimal('0.0129683')
assert results.optidew_ok is True
assert results.pow_out == Decimal('-0.0012')
assert results.pow_ref == Decimal('-0.0015')
assert results.pressure == 99732
assert results.surface_temp == Decimal('291.34')
assert results.ic_temp == Decimal('294.86')
elif index == 1:
assert results.cap_man_ok is True
assert results.dew_point == Decimal('284.3')
assert results.idx == 2
assert results.mass == Decimal('0.0129682')
assert results.optidew_ok is True
assert results.pow_out == Decimal('-0.0011')
assert results.pow_ref == Decimal('-0.0015')
assert results.pressure == 99749
assert results.surface_temp == Decimal('291.3')
assert results.ic_temp == Decimal('294.86')
else: # index must be 2
assert results.cap_man_ok is True
assert results.dew_point == Decimal('284.3')
assert results.idx == 3
assert results.mass == Decimal('0.0129682')
assert results.optidew_ok is True
assert results.pow_out == Decimal('-0.0011')
assert results.pow_ref == Decimal('-0.0016')
assert results.pressure == 99727
assert results.surface_temp == Decimal('291.22')
assert results.ic_temp == Decimal('294.86')
# get_experiment_spec --------------------------------------------------------
def test_get_experiment_spec(exp_acc): # noqa: D103
# Arrange ----------------------------------------------------------------
exp_acc._connect(tdms_path)
# Act --------------------------------------------------------------------
result = exp_acc._get_experiment_specs()
# Assert -----------------------------------------------------------------
assert result.author == 'RHI'
assert result.datetime == datetime.datetime(
2019, 6, 1, 17, 56, 34, 399828, tzinfo=utc)
assert result.description == 'Test description 1.'
assert result.tube_id == 1
# get_setting_spec -----------------------------------------------------------
def test_get_setting_spec(exp_acc): # noqa: D103
# Arrange ----------------------------------------------------------------
exp_acc._connect(tdms_path)
# Act --------------------------------------------------------------------
result = exp_acc._get_setting_specs()
# Assert -----------------------------------------------------------------
assert result.duty == Decimal('0.0')
assert result.pressure == int(1e5)
assert result.temperature == Decimal('290')
assert result.time_step == Decimal('1.0')
# get_raw_data ---------------------------------------------------------------
def test_get_raw_data(exp_acc): # noqa: D103
# Act --------------------------------------------------------------------
result = exp_acc.get_raw_data(tdms_path)
# Assert -----------------------------------------------------------------
# Spot check values from each attribute.
assert result.setting.duty == Decimal('0')
assert result.experiment.datetime == datetime.datetime(
2019, 6, 1, 17, 56, 34, 399828, tzinfo=utc)
assert result.observations[0].pressure == 99732
assert result.observations[0].temperatures[0].thermocouple_num == 4
# Check the length of observations and temperatures
assert len(result.observations) == 3
assert len(result.observations[0].temperatures) == 10
# layout_raw_data ------------------------------------------------------------
def test_layout_raw_data(exp_acc, raw_layout): # noqa: D103
# Arrange ----------------------------------------------------------------
raw_data = exp_acc.get_raw_data(tdms_path)
# Act --------------------------------------------------------------------
layout = exp_acc.layout_raw_data(raw_data)
# Assert -----------------------------------------------------------------
assert layout.style == raw_layout.style
# mass and temperature
assert layout.plots[0] == raw_layout.plots[0]
# pressure
assert layout.plots[1] == raw_layout.plots[1]
# add_fit --------------------------------------------------------------------
def test_add_fit_that_does_not_exist(exp_acc, fit_spec): # noqa: D103
# Arrange ----------------------------------------------------------------
expected_experiment_id = 1
expected_idx = fit_spec.idx
# Act --------------------------------------------------------------------
exp_id, idx = exp_acc.add_fit(fit_spec, expected_experiment_id)
# Assert -----------------------------------------------------------------
assert exp_id == expected_experiment_id
assert idx == expected_idx
# Now query result -------------------------------------------------------
session = exp_acc.Session()
try:
query = session.query(Fit).filter(
and_(Fit.idx == expected_idx, Fit.experiment_id == exp_id)
)
result = query.one()
session.commit()
assert result.a == 1.0
assert result.sig_a == 2.0
assert result.b == -3.0
assert result.sig_b == 4.0
assert result.r2 == 5.0
assert result.q == 6.0
assert result.chi2 == 7.0
assert result.nu_chi == 8
assert result.experiment_id == 1
assert result.idx == 0
assert result.mddp == 9
assert result.sig_mddp == 9.1
assert result.x1s == 10
assert result.sig_x1s == 10.1
assert result.x1e == 11
assert result.sig_x1e == 11.1
assert result.x1 == 12
assert result.sig_x1 == 12.1
assert result.m1s == 13
assert result.sig_m1s == 13.1
assert result.m1e == 14
assert result.sig_m1e == 14.1
assert result.m1 == 15
assert result.sig_m1 == 15.1
assert result.rhos == 16
assert result.sig_rhos == 16.1
assert result.rhoe == 17
assert result.sig_rhoe == 17.1
assert result.rho == 18
assert result.sig_rho == 18.1
assert result.Bm1 == 19
assert result.sig_Bm1 == 19.1
assert result.T == 20
assert result.sig_T == 20.1
assert result.D12 == 21
assert result.sig_D12 == 21.1
assert result.hfg == 22
assert result.sig_hfg == 22.1
assert result.hu == 23
assert result.sig_hu == 23.1
assert result.hs == 24
assert result.sig_hs == 24.1
assert result.cpv == 25
assert result.sig_cpv == 25.1
assert result.he == 26
assert result.sig_he == 26.1
assert result.cpl == 27
assert result.sig_cpl == 27.1
assert result.hT == 28
assert result.sig_hT == 28.1
assert result.qcu == 29
assert result.sig_qcu == 29.1
assert result.Ebe == 30
assert result.sig_Ebe == 30.1
assert result.Ebs == 31
assert result.sig_Ebs == 31.1
assert result.qrs == 32
assert result.sig_qrs == 32.1
assert result.kv == 33
assert result.sig_kv == 33.1
assert result.alpha == 34
assert result.sig_alpha == 34.1
assert result.Bh == 35
assert result.sig_Bh == 35.1
assert result.M == 36
assert result.sig_M == 36.1
assert result.gamma1 == 37
assert result.sig_gamma1 == 37.1
assert result.beta == 38
assert result.sig_beta == 38.1
assert result.Delta_m == 39
assert result.sig_Delta_m == 39.1
assert result.Delta_T == 40
assert result.sig_Delta_T == 40.1
assert result.mu == 41
assert result.sig_mu == 41.1
assert result.nu == 42
assert result.sig_nu == 42.1
assert result.gamma2 == 43
assert result.sig_gamma2 == 43.1
assert result.ShR == 44.0
assert result.sig_ShR == 44.1
assert result.NuR == 45.0
assert result.sig_NuR == 45.1
assert result.Le == 46.0
assert result.sig_Le == 46.1
assert result.GrR_binary == 47.0
assert result.sig_GrR_binary == 47.1
assert result.GrR_primary == 48.0
assert result.sig_GrR_primary == 48.1
assert result.Ts == 49.0
assert result.sig_Ts == 49.1
finally:
session.close()
def test_add_fit_that_already_exists(exp_acc, fit_spec): # noqa: D103
# Arrange ----------------------------------------------------------------
expected_experiment_id = 1
expected_idx = fit_spec.idx
# NOTE: The test above already added the fit
# NOTE: These tests are intended to be run sequently
# Act --------------------------------------------------------------------
new_exp_id, new_idx = exp_acc.add_fit(fit_spec, expected_experiment_id)
# Assert -----------------------------------------------------------------
assert new_exp_id == expected_experiment_id
assert new_idx == expected_idx
| mit |
tmrowco/electricitymap | parsers/AU.py | 1 | 22469 | #!/usr/bin/env python3
import json
# The arrow library is used to handle datetimes
import arrow
import numpy as np
import pandas as pd
# The request library is used to fetch content through HTTP
import requests
from .lib import AU_battery, AU_solar
AMEO_CATEGORY_DICTIONARY = {
'Bagasse': 'biomass',
'Black Coal': 'coal',
'Brown Coal': 'coal',
'coal': 'coal',
'Coal Seam Methane': 'gas',
'Diesel': 'oil',
'gas': 'gas',
'hydro': 'hydro',
'Hydro': 'hydro',
'Kerosene': 'oil',
'Landfill / Biogas': 'biomass',
'Landfill / Biogass': 'biomass',
'Landfill Gas': 'biomass',
'Landfill Methane / Landfill Gas': 'biomass',
'Landfill, Biogas': 'biomass',
'Macadamia Nut Shells': 'biomass',
'Natural Gas': 'gas',
'Natural Gas / Diesel': 'gas',
'Natural Gas / Fuel Oil': 'gas',
'oil': 'oil',
'Sewerage/Waste Water': 'biomass',
'Solar': 'solar',
'Solar PV': 'solar',
'Waste Coal Mine Gas': 'gas',
'Waste Water / Sewerage': 'biomass',
'Water': 'hydro',
'Wind': 'wind'
}
AMEO_LOCATION_DICTIONARY = {
'Hallett Power Station': 'AUS-SA',
'Somerton Power Station': 'AUS-VIC',
'AGL SITA Landfill1 Kemps Creek': 'AUS-NSW',
'Angaston Power Station': 'AUS-SA',
'Ararat Wind Farm': 'AUS-VIC',
'Awaba Power Station': 'AUS-NSW',
'Bald Hills Wind Farm': 'AUS-VIC',
'Bankstown Sports Club Plant Units': 'AUS-NSW',
'Banimboola Power Station': 'AUS-VIC',
'Barcaldine Power Station': 'AUS-QLD',
'Barcaldine Solar Farm': 'AUS-QLD',
'Barron Gorge Power Station': 'AUS-QLD',
'Basslink HVDC Link': None,
'Bastyan Power Station': 'AUS-TAS',
'Ballarat Base Hospital Plant': 'AUS-VIC',
'Bell Bay Three Power Station': 'AUS-TAS',
'Bairnsdale Power Station': 'AUS-VIC',
'Burrendong Hydro Power Station': 'AUS-NSW',
'Blowering Power Station': 'AUS-NSW',
'The Bluff Wind Farm': 'AUS-SA',
'Blue Lake Milling Power Plant': 'AUS-SA',
'Boco Rock Wind Farm': 'AUS-NSW',
'SA Water Bolivar Waste Water Treatment (WWT) Plant': 'AUS-SA',
'Browns Plains Land Fill Gas Power Station': 'AUS-QLD',
'Braemar Power Station': 'AUS-QLD',
'Braemar 2 Power Station': 'AUS-QLD',
'Broadmeadows Landfill Gas Power Station': 'AUS-VIC',
'Broken Hill Solar Plant': 'AUS-NSW',
'Brooklyn Landfill Gas Power Station': 'AUS-VIC',
'Brown Mountain': 'AUS-NSW',
'Burrinjuck Power Station': 'AUS-NSW',
'Butlers Gorge Power Station': 'AUS-TAS',
'Bayswater Power Station': 'AUS-NSW',
'Broadwater Power Station Units 1 and 2': 'AUS-NSW',
'Callide A Power Station': 'AUS-QLD',
'Callide B Power Station': 'AUS-QLD',
'Capital Wind Farm': 'AUS-NSW',
'Catagunya Diesel Generation ': 'AUS-TAS',
'Cathedral Rocks Wind Farm': 'AUS-SA',
'Coonooer Bridge Wind Farm': 'AUS-VIC',
'Capital East Solar Farm': 'AUS-NSW',
'Cethana Power Station': 'AUS-TAS',
'Colongra Power Station': 'AUS-NSW',
'Challicum Hills Wind Farm': 'AUS-VIC',
'Claytons Landfill Gas Power Station': 'AUS-VIC',
'Clements Gap Wind Farm': 'AUS-SA',
'Cluny Power Station': 'AUS-TAS',
'Codrington Wind Farm': 'AUS-VIC',
'Condong Power Station Unit 1': 'AUS-NSW',
'Copeton Hydro Power Station': 'AUS-NSW',
'Corio Landfill Gas Power Station': 'AUS-VIC',
'Callide Power Plant': 'AUS-QLD',
'Condamine Power Station A': 'AUS-QLD',
'Cullerin Range Wind Farm': 'AUS-NSW',
'Daandine Power Station': 'AUS-QLD',
'Dartmouth Power Station': 'AUS-VIC',
'Darling Downs Power Station': 'AUS-QLD',
'Devils Gate Power Station': 'AUS-TAS',
'Dry Creek Gas Turbine Station': 'AUS-SA',
'Eastern Creek Power Station': 'AUS-NSW',
'Eildon Pondage Hydro Power Station': 'AUS-VIC',
'Eildon Power Station': 'AUS-VIC',
'Eraring Power Station': 'AUS-NSW',
'Fisher Power Station': 'AUS-TAS',
'Broken Hill Gas Turbines': 'AUS-NSW',
'George Town Diesel Generation': 'AUS-TAS',
'German Creek Power Station': 'AUS-QLD',
'Glenbawn Hydro Power Station': 'AUS-NSW',
'Glenmaggie Hydro Power Station': 'AUS-VIC',
'Gordon Power Station': 'AUS-TAS',
'Grange Avenue Power Station, Grange Avenue Landfill Gas Power Station': 'AUS-NSW',
'Gladstone Power Station': 'AUS-QLD',
'Gullen Range Wind Farm': 'AUS-NSW',
'Gunning Wind Farm': 'AUS-NSW',
'Guthega Power Station': 'AUS-NSW',
'Hallam Road Renewable Energy Facility': 'AUS-VIC',
'Hallett 1 Wind Farm': 'AUS-SA',
'Hallett 2 Wind Farm': 'AUS-SA',
'Hepburn Wind Farm': 'AUS-VIC',
'Hornsdale Wind Farm': 'AUS-SA',
'Hornsdale Wind Farm 2': 'AUS-SA',
'Hunter Economic Zone': 'AUS-NSW',
'Highbury Landfill Gas Power Station Unit 1': 'AUS-SA',
'Hume Power Station': 'AUS-NSW',
'Hunter Valley Gas Turbine': 'AUS-NSW',
'Hazelwood Power Station': None, # Closed
'ISIS Central Sugar Mill Co-generation Plant': 'AUS-QLD',
'Invicta Sugar Mill': 'AUS-QLD',
'Jacks Gully Landfill Gas Power Station': 'AUS-NSW',
'John Butters Power Station': 'AUS-TAS',
'Jeeralang "A" Power Station': 'AUS-VIC',
'Jeeralang "B" Power Station': 'AUS-VIC',
'Jindabyne Small Hydro Power Station': 'AUS-NSW',
'Jounama Small Hydro Power Station': 'AUS-NSW',
'Kareeya Power Station': 'AUS-QLD',
'Keepit Power Station': 'AUS-NSW',
'Kincumber Landfill Site': 'AUS-NSW',
'Kogan Creek Power Station': 'AUS-QLD',
'Ladbroke Grove Power Station': 'AUS-SA',
'Liddell Power Station': 'AUS-NSW',
'Lemonthyme / Wilmot Power Station': 'AUS-TAS',
'Catagunya / Liapootah / Wayatinah Power Station': 'AUS-TAS',
'Lake Bonney Wind Farm': 'AUS-SA',
'Lake Bonney Stage 2 Windfarm': 'AUS-SA',
'Lake Bonney Stage 3 Wind Farm': 'AUS-SA',
'Lake Echo Power Station': 'AUS-TAS',
'Laverton North Power Station': 'AUS-VIC',
'Longford Plant': 'AUS-VIC',
'Lonsdale Power Station': 'AUS-SA',
'Loy Yang B Power Station': 'AUS-VIC',
'Lucas Heights 2 LFG Power Station': 'AUS-NSW',
'Loy Yang A Power Station': 'AUS-VIC',
'Macarthur Wind Farm': 'AUS-VIC',
'Mackay Gas Turbine': 'AUS-QLD',
'Mackintosh Power Station': 'AUS-TAS',
'Moranbah North Power Station, Grosvenor 1 Waste Coal Mine Gas Power Station': 'AUS-QLD',
'Bogong / Mckay Power Station': 'AUS-VIC',
'Meadowbank Diesel Generation': 'AUS-TAS',
'Meadowbank Power Station': 'AUS-TAS',
'Mt Mercer Wind Farm': 'AUS-VIC',
'Midlands Power Station': 'AUS-TAS',
'Mintaro Gas Turbine Station': 'AUS-SA',
'Mortons Lane Wind Farm': 'AUS-VIC',
'Moranbah Generation Project': 'AUS-QLD',
'Moree Solar Farm': 'AUS-NSW',
'Mornington Waste Disposal Facility': 'AUS-VIC',
'Mortlake Power Station Units': 'AUS-VIC',
'Mt Piper Power Station': 'AUS-NSW',
'Millmerran Power Plant': 'AUS-QLD',
'Mt Stuart Power Station': 'AUS-QLD',
'Mt Millar Wind Farm': 'AUS-SA',
'Mugga Lane Solar Park': 'AUS-NSW',
'Murray 1 Power Station, Murray 2 Power Station': 'AUS-NSW',
'Musselroe Wind Farm': 'AUS-TAS',
'North Brown Hill Wind Farm': 'AUS-SA',
'Nine Network Willoughby Plant': 'AUS-NSW',
'Newport Power Station': 'AUS-VIC',
'Northern Power Station': None, # Closed
'Nyngan Solar Plant': 'AUS-NSW',
'Oakey Power Station': 'AUS-QLD',
'Oaklands Hill Wind Farm': 'AUS-VIC',
'Oaky Creek 2 Waste Coal Mine Gas Power Station Units 1-15': 'AUS-QLD',
'Oaky Creek Power Station': 'AUS-QLD',
'Osborne Power Station': 'AUS-SA',
'Paloona Power Station': 'AUS-TAS',
'Pedler Creek Landfill Gas Power Station Units 1-3': 'AUS-SA',
'Pindari Hydro Power Station': 'AUS-NSW',
'Playford B Power Station': None, # Closed
'Poatina Power Station': 'AUS-TAS',
'Port Lincoln Gas Turbine': 'AUS-SA',
'Port Latta Diesel Generation': 'AUS-TAS',
'Portland Wind Farm': 'AUS-VIC',
'Pelican Point Power Station': 'AUS-SA',
'Port Stanvac Power Station 1': 'AUS-SA',
'Wivenhoe Power Station No. 1 Pump': 'AUS-QLD',
'Wivenhoe Power Station No. 2 Pump': 'AUS-QLD',
'Quarantine Power Station': 'AUS-SA',
'Reece Power Station': 'AUS-TAS',
'Remount Power Station': 'AUS-TAS',
'Repulse Power Station': 'AUS-TAS',
'Rochedale Renewable Energy Facility': 'AUS-QLD',
'Roghan Road LFG Power Plant': 'AUS-QLD',
'Roma Gas Turbine Station': 'AUS-QLD',
'Rowallan Power Station': 'AUS-TAS',
'Royalla Solar Farm': 'AUS-NSW',
'Rocky Point Cogeneration Plant': 'AUS-QLD',
'Shepparton Wastewater Treatment Facility': 'AUS-VIC',
'Bendeela / Kangaroo Valley Power Station': 'AUS-NSW',
'Bendeela / Kangaroo Valley Pumps': 'AUS-NSW',
'Smithfield Energy Facility': 'AUS-NSW',
'South East Water - Hallam Hydro Plant': 'AUS-VIC',
'Snowtown Wind Farm Stage 2 North': 'AUS-SA',
'Snowtown South Wind Farm': 'AUS-SA',
'Snowtown Wind Farm Units 1 to 47': 'AUS-SA',
'Snuggery Power Station': 'AUS-SA',
'Stanwell Power Station': 'AUS-QLD',
'Starfish Hill Wind Farm': 'AUS-SA',
'St George Leagues Club Plant': 'AUS-NSW',
'Southbank Institute Of Technology Unit 1 Plant': 'AUS-QLD',
'Suncoast Gold Macadamias': 'AUS-QLD',
'Springvale Landfill Gas Power Station': 'AUS-NSW',
'Swanbank E Gas Turbine': 'AUS-QLD',
'Tallawarra Power Station': 'AUS-NSW',
'Taralga Wind Farm': 'AUS-NSW',
'Tarong Power Station': 'AUS-QLD',
'Tarraleah Power Station': 'AUS-TAS',
'Tatiara Bordertown Plant': 'AUS-SA',
'Tatura Biomass Generator': 'AUS-VIC',
'Tea Tree Gully Landfill Gas Power Station Unit 1': 'AUS-SA',
'Teralba Power Station': 'AUS-NSW',
'Terminal Storage Mini Hydro Power Station': 'AUS-SA',
'Taralgon Network Support Station': 'AUS-VIC',
'The Drop Hydro Unit 1': 'AUS-NSW',
'Veolia Ti Tree Bio Reactor': 'AUS-QLD',
'Tarong North Power Station': 'AUS-QLD',
'Toora Wind Farm': 'AUS-VIC',
'Torrens Island Power Station "A"': 'AUS-SA',
'Torrens Island Power Station "B"': 'AUS-SA',
'Trevallyn Power Station': 'AUS-TAS',
'Tribute Power Station': 'AUS-TAS',
'Tumut 3 Power Station': 'AUS-NSW',
'Tungatinah Power Station': 'AUS-TAS',
'Tamar Valley Combined Cycle Power Station': 'AUS-TAS',
'Tamar Valley Peaking Power Station': 'AUS-TAS',
'Tumut 1 Power Station, Tumut 2 Power Station': 'AUS-NSW',
'Uranquinty Power Station': 'AUS-NSW',
'Vales Point "B" Power Station': 'AUS-NSW',
'Valley Power Peaking Facility': 'AUS-VIC',
'Wivenhoe Power Station': 'AUS-NSW',
'Waterloo Wind Farm': 'AUS-SA',
'Waubra Wind Farm': 'AUS-VIC',
'Woodlawn Bioreactor Energy Generation Station': 'AUS-NSW',
'Western Suburbs League Club (Campbelltown) Plant': 'AUS-NSW',
'West Illawarra Leagues Club Plant': 'AUS-NSW',
'Windy Hill Wind Farm': 'AUS-QLD',
'Whitwood Road Renewable Energy Facility': 'AUS-QLD',
'Wilga Park A Power Station': 'AUS-VIC',
'Wilga Park B Power Station': 'AUS-VIC',
'William Hovell Hydro Power Station': 'AUS-VIC',
'Wingfield 1 Landfill Gas Power Station Units 1-4': 'AUS-SA',
'Wingfield 2 Landfill Gas Power Station Units 1-4': 'AUS-SA',
'West Kiewa Power Station': 'AUS-VIC',
'West Nowra Landfill Gas Power Generation Facility': 'AUS-NSW',
'Wollert Renewable Energy Facility': 'AUS-SA',
'Wonthaggi Wind Farm': 'AUS-VIC',
'Woodlawn Wind Farm': 'AUS-NSW',
'Woolnorth Studland Bay / Bluff Point Wind Farm': 'AUS-TAS',
'Woy Woy Landfill Site': 'AUS-NSW',
'Wattle Point Wind Farm': 'AUS-VIC',
'Wyangala A Power Station': 'AUS-NSW',
'Wyangala B Power Station': 'AUS-NSW',
'Wyndham Waste Disposal Facility': 'AUS-VIC',
'Townsville Gas Turbine': 'AUS-QLD',
'Yambuk Wind Farm': 'AUS-VIC',
'Yarwun Power Station': 'AUS-QLD',
'Yallourn W Power Station': 'AUS-VIC',
}
AMEO_STATION_DICTIONARY = {
'Basslink HVDC Link': 'Import / Export',
# 'Bendeela / Kangaroo Valley Pumps': 'storage',
# 'Rocky Point Cogeneration Plant': 'storage',
# 'Wivenhoe Power Station No. 1 Pump': 'storage',
# 'Wivenhoe Power Station No. 2 Pump': 'storage',
'Yarwun Power Station': 'coal'
}
def fetch_production(zone_key=None, session=None, target_datetime=None, logger=None):
"""Requests the last known production mix (in MW) of a given country
Arguments:
zone_key (optional) -- used in case a parser is able to fetch multiple countries
session (optional) -- request session passed in order to re-use an existing session
Return:
A dictionary in the form:
{
'zoneKey': 'FR',
'datetime': '2017-01-01T00:00:00Z',
'production': {
'biomass': 0.0,
'coal': 0.0,
'gas': 0.0,
'hydro': 0.0,
'nuclear': null,
'oil': 0.0,
'solar': 0.0,
'wind': 0.0,
'geothermal': 0.0,
'unknown': 0.0
},
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
"""
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
url = 'http://services.aremi.nationalmap.gov.au/aemo/v3/csv/all'
df = pd.read_csv(url)
data = {
'zoneKey': zone_key,
'capacity': {
'coal': 0,
'geothermal': 0,
'hydro': 0,
'nuclear': 0,
},
'production': {
'coal': 0,
'geothermal': 0,
'hydro': 0,
'nuclear': 0
},
'storage': {},
'source': 'aremi.nationalmap.gov.au, pv-map.apvi.org.au',
}
# It's possible that the csv sometimes contains several timestamps.
# https://github.com/tmrowco/electricitymap/issues/704
# Find the latest timestamp.
timestamps = df['Most Recent Output Time (AEST)'].dropna().values
latest_ts = max([arrow.get(x) for x in timestamps.tolist()])
data['datetime'] = latest_ts.datetime
for rowIndex, row in df.iterrows():
station = row['Station Name']
fuelsource = row['Fuel Source - Descriptor']
if station not in AMEO_LOCATION_DICTIONARY:
logger.warning('WARNING: station %s does not belong to any state' % station)
continue
if AMEO_LOCATION_DICTIONARY[station] != zone_key:
continue
if row['Most Recent Output Time (AEST)'] == '-':
continue
key = (AMEO_CATEGORY_DICTIONARY.get(fuelsource, None) or
AMEO_STATION_DICTIONARY.get(station, None))
value = row['Current Output (MW)']
if np.isnan(value):
value = 0.0
else:
try:
value = float(row['Current Output (MW)'])
except ValueError:
value = 0.0
if not key:
# Unrecognized source, ignore
if value:
# If it had production, show warning
logger.warning('WARNING: key {} is not supported, row {}'.format(fuelsource, row))
continue
# Skip HVDC links
if AMEO_CATEGORY_DICTIONARY.get(station, None) == 'Import / Export':
continue
# Disregard substantially negative values, but let slightly negative values through
if value < -1:
logger.warning('Skipping %s because production can\'t be negative (%s)' % (station, value))
continue
# Parse the datetime and check it matches the most recent one.
try:
plant_timestamp = arrow.get(row['Most Recent Output Time (AEST)']).datetime
except (OSError, ValueError):
# ignore invalid dates, they might be parsed as NaN
continue
else:
# if plant_timestamp could be parsed successfully,
# check plant_timestamp equals latest_timestamp and drop plant otherwise
if plant_timestamp != data['datetime']:
continue
# Initialize key in data dictionaries if not set
if key not in data['production']:
data['production'][key] = 0.0
if key not in data['capacity']:
data['capacity'][key] = 0.0
data['production'][key] += value
data['capacity'][key] += float(row['Max Cap (MW)'])
data['production'][key] = max(data['production'][key], 0)
data['capacity'][key] = max(data['capacity'][key], 0)
# find distributed solar production and add it in
session = session or requests.session()
distributed_solar_production = AU_solar.fetch_solar_for_date(zone_key, data['datetime'],
session)
if distributed_solar_production:
data['production']['solar'] = (data['production'].get('solar', 0) +
distributed_solar_production)
if zone_key == 'AUS-SA':
# Get South Australia battery status.
data['storage']['battery'] = AU_battery.fetch_SA_battery()
return data
# It appears that the interconnectors are named according to positive flow.
# That is, NSW1-QLD1 reports positive values when there is flow from NSW to QLD,
# and negative values when flow is from QLD to NSW.
# To verify, compare with flows shown on
# http://aemo.com.au/Electricity/National-Electricity-Market-NEM/Data-dashboard#nem-dispatch-overview
EXCHANGE_MAPPING_DICTIONARY = {
'AUS-NSW->AUS-QLD': {
'region_id': 'QLD1',
'interconnector_names': ['N-Q-MNSP1', 'NSW1-QLD1'],
'directions': [1, 1]
},
'AUS-NSW->AUS-VIC': {
'region_id': 'NSW1',
'interconnector_names': ['VIC1-NSW1'],
'directions': [-1]
},
'AUS-SA->AUS-VIC': {
'region_id': 'VIC1',
'interconnector_names': ['V-SA', 'V-S-MNSP1'],
'directions': [-1, -1]
},
'AUS-TAS->AUS-VIC': {
'region_id': 'VIC1',
'interconnector_names': ['T-V-MNSP1'],
'directions': [1]
},
}
def fetch_exchange(zone_key1=None, zone_key2=None, session=None, target_datetime=None,
logger=None):
"""Requests the last known power exchange (in MW) between two countries
Arguments:
zone_key (optional) -- used in case a parser is able to fetch multiple countries
session (optional) -- request session passed in order to re-use an existing session
Return:
A dictionary in the form:
{
'sortedZoneKeys': 'DK->NO',
'datetime': '2017-01-01T00:00:00Z',
'netFlow': 0.0,
'source': 'mysource.com'
}
"""
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))
mapping = EXCHANGE_MAPPING_DICTIONARY[sorted_zone_keys]
r = session or requests.session()
url = 'https://www.aemo.com.au/aemo/apps/api/report/ELEC_NEM_SUMMARY'
response = r.get(url)
obj = list(filter(lambda o: o['REGIONID'] == mapping['region_id'],
response.json()['ELEC_NEM_SUMMARY']))[0]
flows = json.loads(obj['INTERCONNECTORFLOWS'])
net_flow = 0
import_capacity = 0
export_capacity = 0
for i in range(len(mapping['interconnector_names'])):
interconnector_name = mapping['interconnector_names'][i]
interconnector = list(filter(lambda f: f['name'] == interconnector_name, flows))[0]
direction = mapping['directions'][i]
net_flow += direction * interconnector['value']
import_capacity += direction * interconnector[
'importlimit' if direction == 1 else 'exportlimit']
export_capacity += direction * interconnector[
'exportlimit' if direction == 1 else 'importlimit']
data = {
'sortedZoneKeys': sorted_zone_keys,
'netFlow': net_flow,
'capacity': [import_capacity, export_capacity], # first one should be negative
'source': 'aemo.com.au',
'datetime': arrow.get(arrow.get(obj['SETTLEMENTDATE']).datetime, 'Australia/NSW').replace(
minutes=-5).datetime
}
return data
PRICE_MAPPING_DICTIONARY = {
'AUS-NSW': 'NSW1',
'AUS-QLD': 'QLD1',
'AUS-SA': 'SA1',
'AUS-TAS': 'TAS1',
'AUS-VIC': 'VIC1',
}
def fetch_price(zone_key=None, session=None, target_datetime=None, logger=None):
"""Requests the last known power price of a given country
Arguments:
zone_key (optional) -- used in case a parser is able to fetch multiple countries
session (optional) -- request session passed in order to re-use an existing session
Return:
A dictionary in the form:
{
'zoneKey': 'FR',
'currency': 'EUR',
'datetime': '2017-01-01T00:00:00Z',
'price': 0.0,
'source': 'mysource.com'
}
"""
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
r = session or requests.session()
url = 'https://www.aemo.com.au/aemo/apps/api/report/ELEC_NEM_SUMMARY'
response = r.get(url)
obj = list(filter(lambda o:
o['REGIONID'] == PRICE_MAPPING_DICTIONARY[zone_key],
response.json()['ELEC_NEM_SUMMARY']))[0]
data = {
'zoneKey': zone_key,
'currency': 'AUD',
'price': obj['PRICE'],
'source': 'aemo.com.au',
'datetime': arrow.get(arrow.get(obj['SETTLEMENTDATE']).datetime, 'Australia/NSW').replace(
minutes=-5).datetime
}
return data
if __name__ == '__main__':
"""Main method, never used by the Electricity Map backend, but handy for testing."""
print('fetch_production("AUS-NSW") ->')
print(fetch_production('AUS-NSW'))
print('fetch_production("AUS-QLD") ->')
print(fetch_production('AUS-QLD'))
print('fetch_production("AUS-SA") ->')
print(fetch_production('AUS-SA'))
print('fetch_production("AUS-TAS") ->')
print(fetch_production('AUS-TAS'))
print('fetch_production("AUS-VIC") ->')
print(fetch_production('AUS-VIC'))
# print("fetch_exchange('AUS-NSW', 'AUS-QLD') ->")
# print(fetch_exchange('AUS-NSW', 'AUS-QLD'))
# print("fetch_exchange('AUS-NSW', 'AUS-VIC') ->")
# print(fetch_exchange('AUS-NSW', 'AUS-VIC'))
# print("fetch_exchange('AUS-VIC', 'AUS-SA') ->")
# print(fetch_exchange('AUS-VIC', 'AUS-SA'))
# print("fetch_exchange('AUS-VIC', 'AUS-TAS') ->")
# print(fetch_exchange('AUS-VIC', 'AUS-TAS'))
| gpl-3.0 |
nkhuyu/neon | setup.py | 5 | 6352 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import os
from setuptools import setup, find_packages, Command
import subprocess
# Define version information
VERSION = '0.9.0'
FULLVERSION = VERSION
write_version = True
try:
pipe = subprocess.Popen(["git", "rev-parse", "--short", "HEAD"],
stdout=subprocess.PIPE)
(so, serr) = pipe.communicate()
if pipe.returncode == 0:
FULLVERSION += "+%s" % so.strip().decode("utf-8")
except:
pass
if write_version:
txt = "# " + ("-" * 77) + "\n"
txt += "# Copyright 2014 Nervana Systems Inc.\n"
txt += "# Licensed under the Apache License, Version 2.0 "
txt += "(the \"License\");\n"
txt += "# you may not use this file except in compliance with the "
txt += "License.\n"
txt += "# You may obtain a copy of the License at\n"
txt += "#\n"
txt += "# http://www.apache.org/licenses/LICENSE-2.0\n"
txt += "#\n"
txt += "# Unless required by applicable law or agreed to in writing, "
txt += "software\n"
txt += "# distributed under the License is distributed on an \"AS IS\" "
txt += "BASIS,\n"
txt += "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or "
txt += "implied.\n"
txt += "# See the License for the specific language governing permissions "
txt += "and\n"
txt += "# limitations under the License.\n"
txt += "# " + ("-" * 77) + "\n"
txt += "\"\"\"\n%s\n\"\"\"\nVERSION = '%s'\nSHORT_VERSION = '%s'\n"
fname = os.path.join(os.path.dirname(__file__), 'neon', 'version.py')
a = open(fname, 'w')
try:
a.write(txt % ("Project version information.", FULLVERSION, VERSION))
finally:
a.close()
# Define dependencies
dependency_links = []
required_packages = ['numpy>=1.8.1', 'PyYAML>=3.11']
class NeonCommand(Command):
description = "Passes additional build type options to subsequent commands"
user_options = [('cpu=', None, 'Add CPU backend related dependencies'),
('gpu=', None, 'Add GPU backend related dependencies'),
('dev=', None, 'Add development related dependencies')]
def initialize_options(self):
self.cpu = "0"
self.gpu = "0"
self.dev = "0"
def run(self):
if self.dev == "1":
self.distribution.install_requires += ['nose>=1.3.0',
'flake8>=2.2.2',
'pep8-naming>=0.2.2',
'Pillow>=2.5.0',
'sphinx>=1.2.2',
'posix_ipc>=1.0.0',
'sphinxcontrib-napoleon' +
'>=0.2.8',
'scikit-learn>=0.15.2',
'matplotlib>=1.4.0',
'imgworker>=0.2.5']
self.distribution.dependency_links += ['git+https://github.com/'
'NervanaSystems/'
'imgworker.git#'
'egg=imgworker']
if self.gpu == "1" or self.gpu == "cudanet":
self.distribution.install_requires += ['cudanet>=0.2.7',
'pycuda>=2015.1']
self.distribution.dependency_links += ['git+https://github.com/'
'NervanaSystems/'
'cuda-convnet2.git#'
'egg=cudanet']
if self.gpu == "nervanagpu":
self.distribution.install_requires += ['nervanagpu>=0.3.3']
self.distribution.dependency_links += ['git+https://github.com/'
'NervanaSystems/'
'nervanagpu.git#'
'egg=nervanagpu']
def finalize_options(self):
pass
setup(name='neon',
version=VERSION,
description='Deep learning framework with configurable backends',
long_description=open('README.md').read(),
author='Nervana Systems',
author_email='info@nervanasys.com',
url='http://www.nervanasys.com',
license='License :: OSI Approved :: Apache Software License',
scripts=['bin/neon', 'bin/hyperopt'],
packages=find_packages(),
install_requires=required_packages,
cmdclass={'neon': NeonCommand},
classifiers=['Development Status :: 3 - Alpha',
'Environment :: Console',
'Environment :: Console :: Curses',
'Environment :: Web Environment',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: ' +
'Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: System :: Distributed Computing'])
| apache-2.0 |
CharlesGulian/Deconv | write_pixel_offset.py | 1 | 2549 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 26 10:37:01 2016
@author: charlesgulian
"""
# Add (x,y) pixel offset to .FITS header of an image
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
class Image:
def __init__(self,filename,category,ID):
self.filename = filename
self.category = category
self.ID = ID
self.masked = None
#image_file = 'AstroImages/Bad/fpC-5781-x25627-y293_stitched_alignCropped.fits'
#new_image_file = ('AstroImages/TEST_IMAGE.fits')
def write_pixel_offset(x_offset,y_offset,image_file,new_image_file=None):
# Add (x,y) pixel offset to .FITS header of an image
header = fits.getheader(image_file)
header['x_offset'] = x_offset
header['y_offset'] = y_offset
if new_image_file == None:
new_image_file = image_file
fits.writeto(new_image_file,fits.getdata(image_file),header,clobber=True)
#write_pixel_offset(1,2,image_file,new_image_file=new_image_file)
deconvolvedImage2 = Image('AstroImages/Deconvolved/normal_deconv.fits','Deconvolved','Normal')
deconvolvedImage3 = Image('AstroImages/Deconvolved/transposed_deconv.fits','Deconvolved','Transposed')
deconvolvedImage4 = Image('AstroImages/Deconvolved/transposed_dir_deconv.fits','Deconvolved','Transposed_Dir')
deconvolvedImage5 = Image('AstroImages/Deconvolved/twice_transposed_deconv.fits','Deconvolved','Twice_Transposed')
deconvolvedImage6 = Image('AstroImages/Deconvolved/shuffled_deconv.fits','Deconvolved','Shuffled')
deconvolvedImage7 = Image('AstroImages/Deconvolved/shuffled_seed1_deconv.fits','Deconvolved','Shuffled_Seed1')
deconvolvedImage8 = Image('AstroImages/Deconvolved/initialized_complete_deconv.fits','Deconvolved','Initialized_Complete')
deconvolvedImage9 = Image('AstroImages/Deconvolved/initialized_rm0-9_deconv.fits','Deconvolved','Initialized_rm0-9')
deconvolvedImage10 = Image('AstroImages/Deconvolved/initialized_rm10-19_deconv.fits','Deconvolved','Initialized_rm10-19')
deconvolvedImage11 = Image('AstroImages/Deconvolved/initialized_rm20-29_deconv.fits','Deconvolved','Initialized_rm20-29')
deconvolvedImage12 = Image('AstroImages/Deconvolved/og_initialized_complete_deconv.fits','Deconvolved','OG_Initialized_Complete')
deconvolvedImage13 = Image('AstroImages/Deconvolved/og_initialized_smoothed_complete_deconv.fits','Deconvolved','OG_Initialized_Smoothed_Complete')
#write_pixel_offset(0,0,deconvolvedImage2.filename)
#write_pixel_offset(0,0,deconvolvedImage3.filename)
write_pixel_offset(0,0,deconvolvedImage13.filename) | gpl-3.0 |
RPGOne/Skynet | test_grid_search.py | 3 | 1451 | # Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from sklearn import datasets
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score, mean_squared_error
import tensorflow as tf
import skflow
class GridSearchTest(tf.test.TestCase):
def testIrisDNN(self):
random.seed(42)
iris = datasets.load_iris()
classifier = skflow.TensorFlowDNNClassifier(
hidden_units=[10, 20, 10], n_classes=3, steps=50)
grid_search = GridSearchCV(classifier,
{'hidden_units': [[5, 5], [10, 10]],
'learning_rate': [0.1, 0.01]})
grid_search.fit(iris.data, iris.target)
score = accuracy_score(iris.target, grid_search.predict(iris.data))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
tf.test.main()
| bsd-3-clause |
wazeerzulfikar/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 52 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
# #############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
# #############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
dmordom/nipype | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 154 | 7759 | import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
twbattaglia/koeken | setup.py | 1 | 1924 | import setuptools
from setuptools.command.install import install
# Install Necessary R packages
class CustomInstallPackages(install):
"""Customized setuptools install command - runs R package install one-liner."""
def run(self):
import subprocess
import shlex
print "Attempting to install R packages...Please wait."
cmd =''' R -e "install.packages(c('optparse', 'gtools', 'klaR','survival', 'mvtnorm', 'modeltools', 'coin', 'MASS'), repos = 'http://cran.stat.ucla.edu')" '''
try:
subprocess.call(shlex.split(cmd))
print "Necessary R packages were sucessfully installed"
except:
print "Error installing R dependecies! Check to see if R is properly installed or see online documentation for more answers."
install.run(self)
# Pkg info
setuptools.setup(
name="koeken",
version="0.2.6",
url="https://github.com/twbattaglia/koeken",
author="Thomas W. Battaglia",
author_email="tb1280@nyu.edu",
description="A Linear Discriminant Analysis (LEfSe) wrapper.",
long_description=open('README.rst').read(),
keywords="Biology Microbiome LEfSe QIIME Formatting Diversity Python Bioinformatics",
scripts=['koeken/koeken.py', 'koeken/lefse_src/format_input.py', 'koeken/lefse_src/run_lefse.py', 'koeken/lefse_src/lefse.py', 'koeken/lefse_src/plot_cladogram.py', 'koeken/lefse_src/hclust2/hclust2.py', 'koeken/pretty_lefse.py'],
cmdclass={'install': CustomInstallPackages},
packages=setuptools.find_packages(),
install_requires=['rpy2', 'argparse', 'pandas', 'biopython'],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4'
]
)
| mit |
andybrnr/QuantEcon.py | examples/evans_sargent_plot1.py | 7 | 1086 | """
Plot 1 from the Evans Sargent model.
@author: David Evans
Edited by: John Stachurski
"""
import numpy as np
import matplotlib.pyplot as plt
from evans_sargent import T, y
tt = np.arange(T) # tt is used to make the plot time index correct.
n_rows = 3
fig, axes = plt.subplots(n_rows, 1, figsize=(10, 12))
plt.subplots_adjust(hspace=0.5)
for ax in axes:
ax.grid()
ax.set_xlim(0, 15)
bbox = (0., 1.02, 1., .102)
legend_args = {'bbox_to_anchor': bbox, 'loc': 3, 'mode': 'expand'}
p_args = {'lw': 2, 'alpha': 0.7}
ax = axes[0]
ax.plot(tt, y[1, :], 'b-', label="output", **p_args)
ax.set_ylabel(r"$Q$", fontsize=16)
ax.legend(ncol=1, **legend_args)
ax = axes[1]
ax.plot(tt, y[2, :], 'b-', label="tax rate", **p_args)
ax.set_ylabel(r"$\tau$", fontsize=16)
ax.set_yticks((0.0, 0.2, 0.4, 0.6, 0.8))
ax.legend(ncol=1, **legend_args)
ax = axes[2]
ax.plot(tt, y[3, :], 'b-', label="first difference in output", **p_args)
ax.set_ylabel(r"$u$", fontsize=16)
ax.set_yticks((0, 100, 200, 300, 400))
ax.legend(ncol=1, **legend_args)
ax.set_xlabel(r'time', fontsize=16)
plt.show()
| bsd-3-clause |
ch3ll0v3k/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 134 | 7452 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
antoinecarme/pyaf | tests/missing_data/test_missing_data_air_passengers_generic.py | 1 | 1810 |
import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
def add_some_missing_data_in_signal(df, col):
lRate = 0.2
df.loc[df.sample(frac=lRate, random_state=1960).index, col] = np.nan
return df
def add_some_missing_data_in_time(df, col):
lRate = 0.2
df.loc[df.sample(frac=lRate, random_state=1960).index, col] = np.nan
return df
def test_air_passengers_missing_data(iTimeMissingDataImputation, iSignalMissingDataImputation):
b1 = tsds.load_airline_passengers()
df = b1.mPastData
if(iTimeMissingDataImputation is not None):
df = add_some_missing_data_in_time(df, b1.mTimeVar)
if(iSignalMissingDataImputation is not None):
df = add_some_missing_data_in_signal(df, b1.mSignalVar)
lEngine = autof.cForecastEngine()
H = b1.mHorizon;
lEngine.mOptions.mMissingDataOptions.mTimeMissingDataImputation = iTimeMissingDataImputation
lEngine.mOptions.mMissingDataOptions.mSignalMissingDataImputation = iSignalMissingDataImputation
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
dfapp_in = df.copy();
dfapp_in.tail()
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/ozone_apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H));
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
| bsd-3-clause |
DBernardes/ProjetoECC | Ganho/Codigo/run.py | 1 | 2380 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Criado em 08 de Novembro de 2016
Descricao: este codigo reune todas as bibliotecas responsaveis para a caracterizacao do ganho do CCD, sao elas:
plotGraph, logfile, makeList_imagesInput e Gain_processesImages. O codigo ira criar duas listas de imagens: flat e bias,
realizando o calculo da instensidade do sinal em funcao da variancia. Esses dados serao usado na plotagem de um grafico linear
e, por meio do coeficiente angular de um ajuste linear calculado, obtem-se o ganho; um segundo grafico e plotado onde
aparece o resultado da subtracao dos dados obtidos pelos valores de um ajuste linear calculado.
@author: Denis Varise Bernardes & Eder Martioli
Laboratorio Nacional de Astrofisica, Brazil.
example: ./ganhoCompleto.py -f'Flat','nImages' -b'Bias'
Esta lista fornecida ao programa deve conter as imagens de bias e as imagens flat associdas em conjunto na forma
biasA,biasB,flatA,flatB.
"""
__version__ = "1.0"
__copyright__ = """
Copyright (c) ... All rights reserved.
"""
import os, sys
import matplotlib.pyplot as plt
import datetime
from optparse import OptionParser
from plotGraph import Graph_sinal_variance, Graph_residuos
from logfile import logfile
from makeList_imagesInput import criaArq_listaImgInput, LeArquivoReturnLista
from Gain_processesImages import calcXY_YerrorBar_XerrorBar, parametrosCaixaPixels
from criaArq_resultadoCaract import arquivoCaract
from astropy.io import fits
numeroImagens = 5
Flat_name = 'Flat'
Bias_name = 'Bias'
images_path = r'C:\Users\observer\Desktop\Imagens_ECC\Gain'
criaArq_listaImgInput(numeroImagens, Flat_name, images_path)
criaArq_listaImgInput(1, Bias_name, images_path)
listaBias = LeArquivoReturnLista(Bias_name+'list.txt', images_path)
listaFlat = LeArquivoReturnLista(Flat_name+'list.txt', images_path)
#----------------------------------------------------------------------------------------------------------------------
caixa_pixels = '512,512,100'
parametersBox = parametrosCaixaPixels(caixa_pixels, listaFlat[0])
X,Y,SigmaTotal, XsigmaBar, sigmaBias = calcXY_YerrorBar_XerrorBar(listaFlat, listaBias, numeroImagens, parametersBox, images_path)
plt.figure(figsize=(17,8))
ganho = Graph_sinal_variance(X,Y,SigmaTotal, XsigmaBar, sigmaBias)
Graph_residuos(X,Y, SigmaTotal, images_path)
| mit |
mjasher/gac | original_libraries/flopy-master/examples/scripts/flopy_swi2_ex3.py | 1 | 6979 | import os
import sys
import math
import numpy as np
import flopy.modflow as mf
import flopy.utils as fu
import matplotlib.pyplot as plt
# --modify default matplotlib settings
updates = {'font.family':['Univers 57 Condensed', 'Arial'],
'mathtext.default':'regular',
'pdf.compression':0,
'pdf.fonttype':42,
'legend.fontsize':7,
'axes.labelsize':8,
'xtick.labelsize':7,
'ytick.labelsize':7}
plt.rcParams.update(updates)
def MergeData(ndim, zdata, tb):
sv = 0.05
md = np.empty((ndim), np.float)
md.fill(np.nan)
found = np.empty((ndim), np.bool)
found.fill(False)
for idx, layer in enumerate(zdata):
for jdx, z in enumerate(layer):
if found[jdx] == True:
continue
t0 = tb[idx][0] - sv
t1 = tb[idx][1] + sv
if z < t0 and z > t1:
md[jdx] = z
found[jdx] = True
return md
def LegBar(ax, x0, y0, t0, dx, dy, dt, cc):
for c in cc:
ax.plot([x0, x0 + dx], [y0, y0], color=c, linewidth=4)
ctxt = '{0:=3d} years'.format(t0)
ax.text(x0 + 2. * dx, y0 + dy / 2., ctxt, size=5)
y0 += dy
t0 += dt
return
cleanFiles = False
fext = 'png'
narg = len(sys.argv)
iarg = 0
if narg > 1:
while iarg < narg - 1:
iarg += 1
basearg = sys.argv[iarg].lower()
if basearg == '-clean':
cleanFiles = True
elif basearg == '-pdf':
fext = 'pdf'
if cleanFiles:
print 'cleaning all files'
print 'excluding *.py files'
files = os.listdir('.')
for f in files:
if '.py' != os.path.splitext(f)[1].lower():
print ' removing...{}'.format(os.path.basename(f))
os.remove(f)
sys.exit(1)
modelname = 'swiex3'
exe_name = 'mf2005'
nlay = 3
nrow = 1
ncol = 200
delr = 20.0
delc = 1.
#--well data
lrcQ1 = np.array([(0, 0, 199, 0.01), (2, 0, 199, 0.02)])
lrcQ2 = np.array([(0, 0, 199, 0.01 * 0.5), (2, 0, 199, 0.02 * 0.5)])
#--ghb data
lrchc = np.zeros((30, 5))
lrchc[:, [0, 1, 3, 4]] = [0, 0, 0., 0.8 / 2.0]
lrchc[:, 2] = np.arange(0, 30)
#--swi2 data
zini = np.hstack(( -9 * np.ones(24), np.arange(-9, -50, -0.5), -50 * np.ones(94)))[np.newaxis, :]
iso = np.zeros((1, 200), dtype=np.int)
iso[:, :30] = -2
#--model objects
ml = mf.Modflow(modelname, version='mf2005', exe_name=exe_name)
discret = mf.ModflowDis(ml, nrow=nrow, ncol=ncol, nlay=3, delr=delr, delc=delc,
laycbd=[0, 0, 0], top=-9.0, botm=[-29, -30, -50],
nper=2, perlen=[365 * 1000, 1000 * 365], nstp=[500, 500])
bas = mf.ModflowBas(ml, ibound=1, strt=1.0)
bcf = mf.ModflowBcf(ml, laycon=[0, 0, 0], tran=[40.0, 1, 80.0], vcont=[0.005, 0.005])
wel = mf.ModflowWel(ml, stress_period_data={0:lrcQ1, 1:lrcQ2})
ghb = mf.ModflowGhb(ml, stress_period_data={0:lrchc})
swi = mf.ModflowSwi2(ml, nsrf=1, istrat=1, toeslope=0.01, tipslope=0.04, nu=[0, 0.025],
zeta=[zini, zini, zini], ssz=0.2, isource=iso, nsolver=1)
oc = mf.ModflowOc88(ml, save_head_every=100)
pcg = mf.ModflowPcg(ml)
#--write the model files
ml.write_input()
#--run the model
m = ml.run_model(silent=True)
headfile = '{}.hds'.format(modelname)
hdobj = fu.HeadFile(headfile)
head = hdobj.get_data(totim=3.65000E+05)
zetafile = '{}.zta'.format(modelname)
zobj = fu.CellBudgetFile(zetafile)
zkstpkper = zobj.get_kstpkper()
zeta = []
for kk in zkstpkper:
zeta.append(zobj.get_data(kstpkper=kk, text=' ZETASRF 1')[0])
zeta = np.array(zeta)
fwid, fhgt = 7.00, 4.50
flft, frgt, fbot, ftop = 0.125, 0.95, 0.125, 0.925
colormap = plt.cm.spectral #winter
cc = []
icolor = 11
cr = np.linspace(0.0, 0.9, icolor)
for idx in cr:
cc.append(colormap(idx))
lw = 0.5
x = np.arange(-30 * delr + 0.5 * delr, (ncol - 30) * delr, delr)
xedge = np.linspace(-30. * delr, (ncol - 30.) * delr, len(x) + 1)
zedge = [[-9., -29.], [-29., -30.], [-30., -50.]]
fig = plt.figure(figsize=(fwid, fhgt), facecolor='w')
fig.subplots_adjust(wspace=0.25, hspace=0.25, left=flft, right=frgt, bottom=fbot, top=ftop)
ax = fig.add_subplot(311)
ax.text(-0.075, 1.05, 'A', transform=ax.transAxes, va='center', ha='center', size='8')
#--confining unit
ax.fill([-600, 3400, 3400, -600], [-29, -29, -30, -30], fc=[.8, .8, .8], ec=[.8, .8, .8])
#--
z = np.copy(zini[0, :])
zr = z.copy()
p = (zr < -9.) & (zr > -50.0)
ax.plot(x[p], zr[p], color=cc[0], linewidth=lw, drawstyle='steps-mid')
#--
for i in range(5):
zt = MergeData(ncol, [zeta[i, 0, 0, :], zeta[i, 1, 0, :], zeta[i, 2, 0, :]], zedge)
dr = zt.copy()
ax.plot(x, dr, color=cc[i + 1], linewidth=lw, drawstyle='steps-mid')
#--Manufacture a legend bar
LegBar(ax, -200., -33.75, 0, 25, -2.5, 200, cc[0:6])
#--axes
ax.set_ylim(-50, -9)
ax.set_ylabel('Elevation, in meters')
ax.set_xlim(-250., 2500.)
ax = fig.add_subplot(312)
ax.text(-0.075, 1.05, 'B', transform=ax.transAxes, va='center', ha='center', size='8')
#--confining unit
ax.fill([-600, 3400, 3400, -600], [-29, -29, -30, -30], fc=[.8, .8, .8], ec=[.8, .8, .8])
#--
for i in range(4, 10):
zt = MergeData(ncol, [zeta[i, 0, 0, :], zeta[i, 1, 0, :], zeta[i, 2, 0, :]], zedge)
dr = zt.copy()
ax.plot(x, dr, color=cc[i + 1], linewidth=lw, drawstyle='steps-mid')
#--Manufacture a legend bar
LegBar(ax, -200., -33.75, 1000, 25, -2.5, 200, cc[5:11])
#--axes
ax.set_ylim(-50, -9)
ax.set_ylabel('Elevation, in meters')
ax.set_xlim(-250., 2500.)
ax = fig.add_subplot(313)
ax.text(-0.075, 1.05, 'C', transform=ax.transAxes, va='center', ha='center', size='8')
#--confining unit
ax.fill([-600, 3400, 3400, -600], [-29, -29, -30, -30], fc=[.8, .8, .8], ec=[.8, .8, .8])
#--
zt = MergeData(ncol, [zeta[4, 0, 0, :], zeta[4, 1, 0, :], zeta[4, 2, 0, :]], zedge)
ax.plot(x, zt, marker='o', markersize=3, linewidth=0.0, markeredgecolor='blue', markerfacecolor='None')
#--ghyben herzberg
zeta1 = -9 - 40. * (head[0, 0, :])
gbh = np.empty(len(zeta1), np.float)
gbho = np.empty(len(zeta1), np.float)
for idx, z1 in enumerate(zeta1):
if z1 >= -9.0 or z1 <= -50.0:
gbh[idx] = np.nan
gbho[idx] = 0.
else:
gbh[idx] = z1
gbho[idx] = z1
ax.plot(x, gbh, 'r')
np.savetxt('Ghyben-Herzberg.out', gbho)
#--fake figures
ax.plot([-100., -100], [-100., -100], 'r', label='Ghyben-Herzberg')
ax.plot([-100., -100], [-100., -100], 'bo', markersize=3, markeredgecolor='blue', markerfacecolor='None', label='SWI2')
#--legend
leg = ax.legend(loc='lower left', numpoints=1)
leg._drawFrame = False
#--axes
ax.set_ylim(-50, -9)
ax.set_xlabel('Horizontal distance, in meters')
ax.set_ylabel('Elevation, in meters')
ax.set_xlim(-250., 2500.)
outfig = 'Figure08_swi2ex3.{0}'.format(fext)
fig.savefig(outfig, dpi=300)
print 'created...', outfig
| gpl-2.0 |
nikitasingh981/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 53 | 13398 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import scipy.sparse as sp
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
X_sparse, y_dense = load_svmlight_file(datafile)
X_dense = X_sparse.toarray()
y_sparse = sp.csr_matrix(y_dense)
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
X_sliced = X_sparse[np.arange(X_sparse.shape[0])]
y_sliced = y_sparse[np.arange(y_sparse.shape[0])]
for X in (X_sparse, X_dense, X_sliced):
for y in (y_sparse, y_dense, y_sliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
if (sp.issparse(y) and y.shape[0] == 1):
# make sure y's shape is: (n_samples, n_labels)
# when it is sparse
y = y.T
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
X2_dense = X2.toarray()
if dtype == np.float32:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 4)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 4)
else:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 15)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 15)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y_dense = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
y_sparse = sp.csr_matrix(y_dense)
for y in [y_dense, y_sparse]:
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
def test_load_with_long_qid():
# load svmfile with longint qid attribute
data = b("""
1 qid:0 0:1 1:2 2:3
0 qid:72048431380967004 0:1440446648 1:72048431380967004 2:236784985
0 qid:-9223372036854775807 0:1440446648 1:72048431380967004 2:236784985
3 qid:9223372036854775807 0:1440446648 1:72048431380967004 2:236784985""")
X, y, qid = load_svmlight_file(BytesIO(data), query_id=True)
true_X = [[1, 2, 3],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985]]
true_y = [1, 0, 0, 3]
trueQID = [0, 72048431380967004, -9223372036854775807, 9223372036854775807]
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=qid, zero_based=True)
f.seek(0)
X, y, qid = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f.seek(0)
X, y = load_svmlight_file(f, query_id=False, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X) | bsd-3-clause |
supriyantomaftuh/innstereo | innstereo/main_ui.py | 1 | 116913 | #!/usr/bin/python3
"""
This module contains the startup-function and the MainWindow-class.
The MainWindow-class sets up the GUI and controls all its signals. All other
modules and clases are controlled from this class. The startup-function creates
the first instance of the GUI when the program starts.
"""
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, GdkPixbuf
from matplotlib.backends.backend_gtk3cairo import (FigureCanvasGTK3Cairo
as FigureCanvas)
from matplotlib.backends.backend_gtk3 import (NavigationToolbar2GTK3
as NavigationToolbar)
from matplotlib.cm import register_cmap
import mplstereonet
import numpy as np
import scipy.spatial as spatial
import webbrowser
import os
import csv
from matplotlib.lines import Line2D
import json
from collections import OrderedDict
#Internal imports
from .dataview_classes import (PlaneDataView, LineDataView,
FaultPlaneDataView, SmallCircleDataView,
EigenVectorView)
from .layer_view import LayerTreeView
from .layer_types import (PlaneLayer, FaultPlaneLayer, LineLayer,
SmallCircleLayer, EigenVectorLayer)
from .dialog_windows import (AboutDialog, StereonetProperties,
FileChooserParse, FileChooserExport,
FileChooserSave, FileChooserOpen)
from .layer_properties import LayerProperties
from .plot_control import PlotSettings
from .polar_axes import NorthPolarAxes
from .file_parser import FileParseDialog
from .rotation_dialog import RotationDialog
from .viridis import viridis
from .settings import AppSettings
from .i18n import i18n
_ = i18n().language().gettext
class MainWindow(object):
"""
The MainWindow-class handles the properties and signals of the GUI.
The class sets up the GUI and connects all signals. Most methods are
for individual functions of the GUI.
"""
def __init__(self, builder, testing):
"""
Initializes the main window and connects different functions.
Initializes the GUI, connects all its sinals, and runs the
Gtk-main-loop. An instance of the Gtk.Builder is required for init.
An instance of the figure is created and added to the FigureCanvas.
The global startup function enables the program to open another
independent instance of the GUI.
"""
global startup
self.main_window = builder.get_object("main_window")
self.sw_plot = builder.get_object("sw_plot")
self.sw_layer = builder.get_object("sw_layerview")
self.sw_data = builder.get_object("sw_dataview")
self.tb1 = builder.get_object("toolbar1")
self.statbar = builder.get_object("statusbar")
self.plot_menu = builder.get_object("menu_plot_views")
self.builder = builder
register_cmap('viridis', viridis)
context = self.tb1.get_style_context()
context.add_class(Gtk.STYLE_CLASS_PRIMARY_TOOLBAR)
#Clipboard
self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
#Set up default options class
self.settings = PlotSettings(testing)
self.change_night_mode()
#Set up layer view and connect signals
self.layer_store = Gtk.TreeStore(bool, GdkPixbuf.Pixbuf, str, object)
self.layer_view = LayerTreeView(self.layer_store)
self.sw_layer.add(self.layer_view)
#Connect signals of layer view
self.layer_view.renderer_name.connect("edited", self.layer_name_edited)
self.layer_view.renderer_activate_layer.connect("toggled",
self.on_layer_toggled)
self.layer_view.connect("row-activated", self.layer_row_activated)
self.select = self.layer_view.get_selection()
self.select.connect("changed", self.layer_selection_changed)
self.draw_features = False
self.layer_view.connect("drag-begin", self.drag_begin)
self.layer_view.connect("drag-data-get", self.drag_data_get)
self.layer_view.connect("drag-drop", self.drag_drop)
self.layer_view.connect("drag-data-delete", self.drag_data_delete)
self.layer_view.connect("drag-data-received", self.drag_data_received)
self.layer_view.connect("drag-end", self.drag_end)
#Set up the plot
self.fig = self.settings.get_fig()
self.canvas = FigureCanvas(self.fig)
self.sw_plot.add_with_viewport(self.canvas)
self.ax_stereo, self.ax_cbar = self.settings.get_stereonet()
self.cbar = None
self.inv = self.settings.get_inverse_transform()
self.inv_rose = NorthPolarAxes.InvertedNorthPolarTransform()
self.trans = self.settings.get_transform()
self.view_mode = "stereonet"
self.view_changed = False
self.ax_rose = None
self.ax_drose = None
#Set up event-handlers
self.set_up_fisher_menu()
self.canvas.mpl_connect('motion_notify_event',
self.mpl_motion_event)
self.canvas.mpl_connect('button_press_event',
self.mpl_canvas_clicked)
self.redraw_plot()
self.main_window.show_all()
def set_up_fisher_menu(self):
"""
Sets up and handles the signal of the Fisher Statistics popover.
The popover contains an entry for the desired confidence (default is
95) and a button to start the calculation. The calculation adds a
smallcircle layer to the project.
"""
def on_click(button, popovermenu):
"""
Toggles the respective popovermenu.
"""
if popovermenu.get_visible():
popovermenu.hide()
else:
popovermenu.show_all()
def add_fisher_confidence(signal, entry_conf, tb_fisher, pom_fisher):
"""
Calculates the Fisher Confidence small circle.
Checks whether all selected layers are linear layers. If True
all data is collected and a small circle is added to the project.
The size of the small circle corresponds to the confidence in the
calculated direction that the user entered.
"""
on_click(tb_fisher, pom_fisher)
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
confidence = float(entry_conf.get_text())
if len(row_list) == 0:
return
#Check if all selected layers are linear layers.
only_lines = True
for row in row_list:
lyr_obj = model[row][3]
if lyr_obj.get_layer_type() != "line":
only_lines = False
if only_lines is False:
return
total_dipdir = []
total_dip = []
for row in row_list:
lyr_obj = model[row][3]
store = lyr_obj.get_data_treestore()
dipdir, dip, sense = self.parse_lines(store)
for x, y in zip(dipdir, dip):
total_dipdir.append(x)
total_dip.append(y)
vector, stats = mplstereonet.find_fisher_stats(total_dip, total_dipdir, conf=confidence)
new_store, new_lyr_obj = self.add_layer_dataset("smallcircle")
new_lyr_obj.set_label("Fisher Confidence: {} %".format(confidence))
self.add_linear_feature(new_store, vector[1], vector[0], stats[1])
self.redraw_plot()
tb_fisher = self.builder.get_object("toolbutton_fisher")
pom_fisher = Gtk.Popover(relative_to=tb_fisher)
lb_fisher = Gtk.ListBox()
pom_fisher.add(lb_fisher)
row_conf = Gtk.ListBoxRow()
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0,
border_width=10)
row_conf.add(hbox)
label_conf = Gtk.Label(_("Fisher Confidence"), xalign=0)
hbox.pack_start(label_conf, True, True, 3)
entry_conf = Gtk.Entry(width_chars=3, max_width_chars=3, text="95")
hbox.pack_start(entry_conf, False, False, 3)
lb_fisher.add(row_conf)
btn_calc = Gtk.Button(_("Calculate"))
row_btn = Gtk.ListBoxRow()
box = Gtk.Box()
box.pack_start(btn_calc, True, True, 0)
row_btn.add(box)
lb_fisher.add(row_btn)
btn_calc.connect("clicked", add_fisher_confidence, entry_conf, tb_fisher, pom_fisher)
tb_fisher.connect("clicked", on_click, pom_fisher)
def copy_layer(self):
"""
Copies the contents of a layer and all its children.
This method is called by the drag-and-drop and copy-paste functions.
It returns the data as JSON.
"""
tree_selection = self.layer_view.get_selection()
store, itr = tree_selection.get_selected_rows()
model = self.layer_view.get_model()
path = itr[0]
path_str = str(path)
itr = store.get_iter(path)
copy = {}
copy["filetype"] = "InnStereo layer 1.0"
copy["layers"] = []
def append_layer(lyr_obj, path_str, label):
"""
Appends a layer to the serialization dictionary.
Receives a store, iter and path_str. Appends the path, properties
and data to the 'layers' list of the dictionary. For folders it
appends the path, the folder-properties and an empty list (So that
the destination can use iterators also for folders).
"""
#The layer includes the layer and children as
#[[path, properties, data],...]
if lyr_obj is None:
#No lyr_obj means that this is a folder
folder_props = {"type": "folder", "label": label}
folder_props = OrderedDict(sorted(folder_props.items()))
copy["layers"].append([path_str, folder_props, []])
else:
properties = lyr_obj.get_properties()
data = lyr_obj.return_data()
copy["layers"].append([path_str, properties, data])
def iterate_over_store(model, path, itr, start_path):
"""
Iterates over the whole TreeStore and appends all draged layers.
The function iterates over the whole TreeStore, but uses the
path to identify the dragged layer and its children. Calls the
append function on each these layers.
"""
path_str = str(path)
lyr_obj = store[itr][3]
label = store[itr][2]
if path_str.startswith(start_path) == True:
append_layer(lyr_obj, path_str, label)
self.layer_store.foreach(iterate_over_store, path_str)
copy = OrderedDict(sorted(copy.items()))
data = json.dumps(copy)
return data
def insert_layer_data(self, data_dict, drop_info=None):
"""
Inserts layers into a TreeStore.
This method can be used for inserting data at a certain location
or appending it and the end.
"""
def drop_layer(lyr_obj_new, lyr_dict, drop_iter, drop_position):
if lyr_obj_new == None:
lyr_pixbuf = self.settings.get_folder_icon()
lyr_label = lyr_dict["label"]
else:
lyr_obj_new.set_properties(lyr_dict)
lyr_pixbuf = lyr_obj_new.get_pixbuf()
lyr_label = lyr_obj_new.get_label()
if drop_lyr_obj is None:
#0=Before, 1=After, 2=INTO_OR_BEFORE, 3=INTO_OR_AFTER
if drop_position == Gtk.TreeViewDropPosition.BEFORE:
ins_itr = self.layer_store.insert_before(None, drop_iter,
[True, lyr_pixbuf, lyr_label, lyr_obj_new])
elif drop_position == Gtk.TreeViewDropPosition.AFTER:
ins_itr = self.layer_store.insert_after(None, drop_iter,
[True, lyr_pixbuf, lyr_label, lyr_obj_new])
else:
ins_itr = self.layer_store.insert_after(drop_iter, None,
[True, lyr_pixbuf, lyr_label, lyr_obj_new])
else:
if drop_position == Gtk.TreeViewDropPosition.BEFORE:
ins_itr = self.layer_store.insert_before(None, drop_iter,
[True, lyr_pixbuf, lyr_label, lyr_obj_new])
else:
ins_itr = self.layer_store.insert_after(None, drop_iter,
[True, lyr_pixbuf, lyr_label, lyr_obj_new])
return ins_itr
def insert_layer(lyr_obj_new, lyr_dict, ins_iter):
if lyr_obj_new == None:
lyr_pixbuf = self.settings.get_folder_icon()
lyr_label = lyr_dict["label"]
else:
lyr_obj_new.set_properties(lyr_dict)
lyr_pixbuf = lyr_obj_new.get_pixbuf()
lyr_label = lyr_obj_new.get_label()
ins_itr = self.layer_store.insert_before(ins_iter, None,
[True, lyr_pixbuf, lyr_label, lyr_obj_new])
return ins_itr
if drop_info is not None:
#Insert the row at the drop position
insert_rows = True
drop_path, drop_position = drop_info[0], drop_info[1]
drop_iter = self.layer_store.get_iter(drop_path)
drop_row = self.layer_store[drop_iter]
drop_lyr_obj = drop_row[3]
else:
#Append the row to the TreeStore
insert_rows = False
for key, layer in enumerate(data_dict["layers"]):
split_path = layer[0].split(":")
lyr_dict = layer[1]
lyr_data = layer[2]
lyr_obj_new, lyr_store, lyr_view = self.create_layer(lyr_dict["type"])
if lyr_obj_new is not None:
lyr_obj_new.set_properties(lyr_dict)
lyr_pixbuf = lyr_obj_new.get_pixbuf()
lyr_label = lyr_obj_new.get_label()
else:
lyr_pixbuf = self.settings.get_folder_icon()
lyr_label = lyr_dict["label"]
if key == 0 and insert_rows == True:
cutoff = len(layer[0].split(":"))
ins_itr = drop_layer(lyr_obj_new, lyr_dict, drop_iter, drop_position)
iter_dict = {0: ins_itr}
elif key == 0 and insert_rows == False:
cutoff = len(layer[0].split(":"))
ins_itr = self.layer_store.append(None, [True, lyr_pixbuf, lyr_label, lyr_obj_new])
iter_dict = {0: ins_itr}
else:
new_path = split_path[cutoff:]
path_len = len(new_path)
ins_itr = iter_dict[path_len-1]
itr = insert_layer(lyr_obj_new, lyr_dict, ins_itr)
iter_dict[path_len] = itr
for f in lyr_data:
if lyr_dict["type"] == "faultplane":
#Passing a list or tuple to the add feature function would be better.
self.add_feature(lyr_dict["type"], lyr_store, f[0], f[1], f[2], f[3], f[4])
else:
self.add_feature(lyr_dict["type"], lyr_store, f[0], f[1], f[2])
if insert_rows == False:
self.redraw_plot()
def drag_begin(self, treeview, context):
"""
Drag begin signal of the layer view. Currently does nothing.
This signal could be used to set up a e.g. drag icon.
"""
pass
def drag_data_get(self, treeview, context, selection, info, time):
"""
Gets the data from the drag source. Serializes the data to JSON.
Iterates over the draged layer and all its children. Serializes the
path, properties and data. Encodes into JSON and sens it to the
drag destinations.
"""
data = self.copy_layer()
selection.set(selection.get_target(), 8, data.encode())
def drag_drop(self, treeview, context, selection, info, time):
"""
Signal emitted when a layer is droped. Does nothing at the moment.
"""
pass
def drag_data_received(self, treeview, context, x, y, selection, info, time):
"""
Called when data is received at the drop location. Moves the data.
The received JSON is decoded and the validity checked. Then the layers
are recreated and inserted at the drop location.
"""
drop_info = self.layer_view.get_dest_row_at_pos(x, y)
data = selection.get_data().decode()
decoded = json.loads(data)
filetype = decoded["filetype"]
if filetype != "InnStereo layer 1.0":
print("Not a valid layer")
return
self.insert_layer_data(decoded, drop_info)
context.finish(True, True, time)
def drag_end(self, treeview, context):
"""
Signal when drag of a layer is complete. Redraws the plot.
"""
self.redraw_plot()
def drag_data_delete(self, treeview, context):
"""
Signal is emitted when data is deleted. Does nothing at the moment.
"""
pass
def on_toolbutton_cut_clicked(self, toolbutton):
"""
Cuts the selected layer.
The data is copied into the Gdk.Clipboard and then removed from the
TreeStore.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
if len(row_list) == 0:
return
data = self.copy_layer()
self.clipboard.set_text(data, -1)
self.delete_layer(model, row_list)
def on_toolbutton_copy_clicked(self, toolbutton):
"""
Copies the selected layer data into the Gdk.Clipboard.
The data is returned by the copy_layer method. It is returned as
JSON data.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
if len(row_list) == 0:
return
data = self.copy_layer()
self.clipboard.set_text(data, -1)
return data
def on_toolbutton_paste_clicked(self, toolbutton):
"""
Pastes the layer-data.
Copies the data from the clipboard. Checks whether it is valid JSON.
Checks whether it is a valid layer and then adds it to the project.
"""
data = self.clipboard.wait_for_text()
try:
parse = json.loads(data)
except:
return
if parse["filetype"] != "InnStereo layer 1.0":
return
else:
self.insert_layer_data(parse, drop_info=None)
def on_menuitem_stereo_activate(self, radiomenuitem):
# pylint: disable=unused-argument
"""
Switches to the stereonet-only view.
Triggered from the menu bar. If the canvas is in a different view mode
it switches to stereonet-only.
"""
if self.view_mode is not "stereonet":
self.view_changed = True
self.view_mode = "stereonet"
self.redraw_plot()
def on_menuitem_stereo_rose_activate(self, radiomenuitem):
# pylint: disable=unused-argument
"""
Switches to the stereonet and rose-diagram view.
Triggered from the menu bar. If the canvas is in a different view mode
it will be switched to a combined stereonet and rose diagram view.
"""
if self.view_mode is not "stereo-rose":
self.view_changed = True
self.view_mode = "stereo-rose"
self.redraw_plot()
def on_menuitem_stereo_two_rose_activate(self, radiomenuitem):
"""
"""
if self.view_mode is not "stereo-two-rose":
self.view_changed = True
self.view_mode = "stereo-two-rose"
self.redraw_plot()
def on_menuitem_rose_view_activate(self, radiomenuitem):
# pylint: disable=unused-argument
"""
Switches to the rose-diagram-only view.
Triggered from the menu bar. If the canvas is in a different view mode
it will be switched to a rose diagram only view.
"""
if self.view_mode is not "rose":
self.view_changed = True
self.view_mode = "rose"
self.redraw_plot()
def on_menuitem_pt_view_activate(self, radiomenuitem):
# pylint: disable=unused-argument
"""
Switches to the paleostress view.
Triggered from the menu bar. If the canvas is in a different view mode
it switches to the PT-View.
"""
if self.view_mode is not "pt":
self.view_changed = True
self.view_mode = "pt"
self.redraw_plot()
def change_night_mode(self):
"""
Changes the night mode.
Gets the current setting and applies it to the window.
"""
state = self.settings.get_night_mode()
Gtk.Settings.get_default().set_property("gtk-application-prefer-dark-theme", state)
self.main_window.show_all()
def on_toolbutton_settings_clicked(self, toolbutton):
"""
Opens the window where the GSettings can be set for Innstereo.
An instance of the window is created and then displayed.
"""
set_win = AppSettings(self.main_window)
set_win.run()
def on_toolbutton_eigenvector_clicked(self, widget):
# pylint: disable=unused-argument
"""
Calculates the eigenvectors and eigenvalues of one or more layers.
Triggered when the user calls the calculation. It checks if all the
selected layers are either planes or linear-layers. If different
layers are selected the calculation is aborted. A successful
calculation adds a new eigenvector-layer.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
values = []
#Check if all selected layers are the same
layers_equal = True
layer_list = []
for row in row_list:
lyr_obj = model[row][3]
if lyr_obj is None:
return
else:
layer_list.append(lyr_obj.get_layer_type())
for a in layer_list:
for b in layer_list:
if a is not b:
layers_equal = False
if layers_equal == False:
self.statbar.push(1, ("Please select only layers of the same type!"))
return
def evaluate_planes():
total_strike = []
total_dip = []
for row in row_list:
lyr_obj = model[row][3]
strike, dipdir, dip = self.parse_planes(
lyr_obj.get_data_treestore())
for x in strike:
total_strike.append(x)
for y in dip:
total_dip.append(y)
dip, dipdir, values = mplstereonet.eigenvectors(total_strike, total_dip)
return dip, dipdir, values
def evaluate_lines():
total_dipdir = []
total_dip = []
for row in row_list:
lyr_obj = model[row][3]
dipdir, dip, sense = \
self.parse_lines(lyr_obj.get_data_treestore())
for x in dipdir:
total_dipdir.append(x)
for y in dip:
total_dip.append(y)
dip, dipdir, values = mplstereonet.eigenvectors(total_dip,
total_dipdir,
measurement="lines")
return dip, dipdir, values
#Check how data should be interpreted:
if layer_list[0] == "plane":
dip, dipdir, values = evaluate_planes()
elif layer_list[0] == "line":
dip, dipdir, values = evaluate_lines()
else:
self.statbar.push(1, ("Please select only plane or line layers!"))
return
#Normalize to 1
values = values/np.sum(values)
store, new_lyr_obj = self.add_layer_dataset("eigenvector")
self.add_eigenvector_feature(store, dipdir[0], dip[0], values[0])
self.add_eigenvector_feature(store, dipdir[1], dip[1], values[1])
self.add_eigenvector_feature(store, dipdir[2], dip[2], values[2])
self.redraw_plot()
def on_toolbutton_rotate_layer_clicked(self, toolbutton):
# pylint: disable=unused-argument
"""
Open the data rotation dialog.
If one or more layers are selected a instance of the data-rotation
dialog is initialized and the selected rows are passed to it.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
if len(row_list) == 0:
self.statbar.push(1, ("Please select layers to rotate!"))
return
def parse_layers(model, path, itr, data, key):
line = model[path]
data[key][3].append([line[0], line[1], line[2]])
data_rows = []
for row in row_list:
lyr_obj = model[row][3]
data_rows.append(lyr_obj)
rotate_dialog = RotationDialog(self.main_window, self.settings,
data_rows, self.add_layer_dataset,
self.add_feature, self.redraw_plot)
rotate_dialog.run()
def on_toolbutton_new_project_clicked(self, widget):
# pylint: disable=unused-argument
"""
Opens a new and indenpendent window of the GUI.
Triggered from the GUI. When the "new project"-button is pressed
this function runs the startup function and creates a new and
independent instance of the GUI.
"""
startup()
def on_menuitem_new_window_activate(self, widget):
# pylint: disable=unused-argument
"""
Opens a new and indenpendent window of the GUI.
Triggered from the menu bar: "File -> New". Opens a new independent
window by calling the global startup function.
"""
startup()
def on_toolbutton_poles_to_lines_clicked(self, widget):
# pylint: disable=unused-argument
"""
Copies the poles of a plane-layer into a new line-layer.
Checks if selected layers are planes or faultplanes. Copies the
dip-direction - dip data into a line-dataset. If many layers are
selected the data will be merged into one layer.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
def iterate_over_data(model, path, itr, n):
"""
Copies data to new layer.
Receives a model, path and itr of a layer, plus the datastore
of the new layer. Converts the plane orientation into a pole
orientation and adds it to the new layer.
"""
r = model[path]
self.add_linear_feature(n, 180 + r[0], 90 - r[1])
for row in row_list:
lyr_obj = model[row][3]
if lyr_obj is None:
return
else:
layer_type = lyr_obj.get_layer_type()
if layer_type == "line":
return
#n = new datastore
n, new_lyr_obj = self.add_layer_dataset("line")
for row in row_list:
lyr_obj = model[row][3]
datastore = lyr_obj.get_data_treestore()
datastore.foreach(iterate_over_data, n)
self.redraw_plot()
def on_toolbutton_save_clicked(self, widget, testing=False):
# pylint: disable=unused-argument
"""
Triggered from the GUI. Saves the project.
Iterates over all layers and stores the data in a dictionary. Passes
the dictionary to the FileChooserSave dialog, which handles writing
the file to the harddisk.
"""
copy = {}
copy["filetype"] = "InnStereo data file 1.0"
copy["settings"] = self.settings.get_properties()
copy["layers"] = []
def append_layer(lyr_obj, path_str, label):
"""
Appends a layer to the serialization dictionary.
Receives a store, iter and path_str. Appends the path, properties
and data to the 'layers' list of the dictionary. For folders it
appends the path, the folder-properties and an empty list (So that
the destination can use iterators also for folders).
"""
#The layer includes the layer and children as
#[[path, properties, data],...]
if lyr_obj is None:
#No lyr_obj means that this is a folder
folder_props = {"type": "folder", "label": label}
folder_props = OrderedDict(sorted(folder_props.items()))
copy["layers"].append([path_str, folder_props, []])
else:
properties = lyr_obj.get_properties()
data = lyr_obj.return_data()
copy["layers"].append([path_str, properties, data])
def iterate_over_store(model, path, itr):
"""
Iterates over the whole TreeStore and appends all layers.
The function iterates over the whole TreeStore and calls the append
function on each layer.
"""
path_str = str(path)
lyr_obj = model[itr][3]
label = model[itr][2]
append_layer(lyr_obj, path_str, label)
self.layer_store.foreach(iterate_over_store)
copy = OrderedDict(sorted(copy.items()))
dump = json.dumps(copy)
if testing == False:
dlg = FileChooserSave(self.main_window, dump)
dlg.run()
return dump
def on_toolbutton_open_clicked(self, toolbutton):
# pylint: disable=unused-argument
"""
Triggered from the GUI. Opens a saved project.
Runs the FileChooserOpen dialog. The dialog calls the open_project
function if a file is opened.
"""
dlg = FileChooserOpen(self.main_window, self.open_project)
dlg.run()
def open_project(self, project_file):
"""
Opens a saved project. Adds all the saved layers to the current window
The opened file is passed from the FileChooserOpen dialog. The file
is read and then the json is parsed. The function then checks if
the file is valid. Then each layer is added to the project. For each
layer the saved properties are set and all the data rows are loaded.
"""
with open(project_file, "r") as prj_file:
read_data = prj_file.read()
parse = json.loads(read_data)
if parse["filetype"] != "InnStereo data file 1.0":
print("Not a valid InnStereo data file")
self.settings.set_properties(parse["settings"])
def insert_layer(lyr_obj_new, lyr_dict, ins_iter):
if lyr_obj_new == None:
lyr_pixbuf = self.settings.get_folder_icon()
lyr_label = lyr_dict["label"]
else:
lyr_obj_new.set_properties(lyr_dict)
lyr_pixbuf = lyr_obj_new.get_pixbuf()
lyr_label = lyr_obj_new.get_label()
ins_itr = self.layer_store.insert_before(ins_iter, None,
[True, lyr_pixbuf, lyr_label, lyr_obj_new])
return ins_itr
def create_and_insert(ins_itr, lyr_dict):
lyr_obj_new, lyr_store, lyr_view = self.create_layer(lyr_dict["type"])
ins_itr = insert_layer(lyr_obj_new, lyr_dict, ins_itr)
if lyr_obj_new is not None:
lyr_obj_new.set_properties(lyr_dict)
return ins_itr, lyr_store
iter_dict = {0: None}
for layer in parse["layers"]:
split_path = layer[0].split(":")
path_len = len(split_path)
lyr_dict = layer[1]
features = layer[2]
#The last path length is assigned to the dictionary
#If the next layer has a longer path it will use the
#previous entry as parent. It is not overwritten, which
#produces a depth-first iteration.
ins_itr = iter_dict[path_len-1]
itr, lyr_store = create_and_insert(ins_itr, lyr_dict)
iter_dict[path_len] = itr
for f in features:
if lyr_dict["type"] == "faultplane":
#Passing a list or tuple to the add feature function would be better.
self.add_feature(lyr_dict["type"], lyr_store, f[0], f[1], f[2], f[3], f[4])
else:
self.add_feature(lyr_dict["type"], lyr_store, f[0], f[1], f[2])
self.redraw_plot()
def on_toolbutton_show_table_clicked(self, widget):
# pylint: disable=unused-argument
"""
Opens dialog to view the data in a table.
__!!__ Maybe implement sorting in this dialog?
__!!__ Not implemented yet.
"""
pass
def delete_layer(self, model, row_list):
"""
Deletes all the passed layers and their children.
Expects a model and list of rows. Deletes the rows and all their
children.
__!!__ Currently has no warning message. What happens to data?
"""
for row in reversed(row_list):
itr = model.get_iter(row)
model.remove(itr)
selection = self.layer_view.get_selection()
selection.unselect_all()
self.redraw_plot()
def on_toolbutton_delete_layer_clicked(self, widget):
# pylint: disable=unused-argument
"""
Deltes the currently selected layer(s).
Triggered when the "remove layers" toolbutton is pressed. Deletes all
selected layers.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
self.delete_layer(model, row_list)
def on_toolbutton_plot_properties_clicked(self, widget):
# pylint: disable=unused-argument
"""
Opens the plot-properties dialog.
Triggered when the toolbutton is pressed. Creates and instance of the
StereonetProperties class, which is a Gtk DialogWindow and runs it.
"""
plot_properties = StereonetProperties(self.settings, self.redraw_plot,
self.main_window,
self.change_night_mode)
plot_properties.run()
def on_toolbutton_save_figure_clicked(self, widget):
# pylint: disable=unused-argument
"""
Opens a dialog to save the figure specified location and file-format.
Opens the matplotlib dialog window that allows saving the current figure
in a specified location, name and file format.
"""
nav = NavigationToolbar(self.canvas, self.main_window)
nav.save_figure()
def layer_view_clicked(self, treeview, button):
# pylint: disable=unused-argument
"""
Unselects all layers if the layer-view is clicked.
Called when one clicks with the mouse on the layer-treeview.
Unselects all selected layers.
"""
selection = self.layer_view.get_selection()
selection.unselect_all()
def on_toolbutton_draw_features_toggled(self, widget):
# pylint: disable=unused-argument
"""
Toggles if featues can be drawn by clicking on the canvas.
Activated when the toggle button is pressed. When self.draw_features
is True then clicking on the canvas with an active layer will draw
a features at that point.
"""
if self.draw_features is False:
self.draw_features = True
else:
self.draw_features = False
self.update_statusbar()
def on_toolbutton_best_plane_clicked(self, widget):
# pylint: disable=unused-argument
"""
Finds the optimal plane for a set of linears.
Iterates over all selected rows and collects the data. Finds the
optimal plane that can be fitted to the data.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
#Check if all selected layers are planes or faultplanes.
only_linears = True
for row in row_list:
lyr_obj = model[row][3]
if lyr_obj.get_layer_type() == "plane":
only_linears = False
if only_linears is False:
return
total_dipdir = []
total_dip = []
for row in row_list:
lyr_obj = model[row][3]
dipdir, dip, sense = self.parse_lines(
lyr_obj.get_data_treestore())
for x in dipdir:
total_dipdir.append(x)
for y in dip:
total_dip.append(y)
fit_strike, fit_dip = mplstereonet.fit_girdle(total_dip, total_dipdir,
measurement="lines")
store, new_lyr_obj = self.add_layer_dataset("plane")
self.add_planar_feature(store, fit_strike + 90, fit_dip)
self.redraw_plot()
def on_toolbutton_plane_intersect_clicked(self, widget):
# pylint: disable=unused-argument
"""
Calculates the best fitting intersect for the selected planes.
This method gathers all the dip-direction and dips of all selected
layers. If linear layers are also selected nothing will be done.
The best-fit intersection is added to the project as a new linear layer.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
if len(row_list) == 0:
return
#Check if all selected layers are planes or faultplanes.
only_planes = True
for row in row_list:
lyr_obj = model[row][3]
if lyr_obj.get_layer_type() == "line":
only_planes = False
if only_planes is False:
return
total_dipdir = []
total_dip = []
#Iterate over layers and rows, gather poles
for row in row_list:
lyr_obj = model[row][3]
strike, dipdir, dip = self.parse_planes(
lyr_obj.get_data_treestore())
for x in strike:
total_dipdir.append(270 + x)
for y in dip:
total_dip.append(90 - y)
self.ax_stereo.line(total_dip, total_dipdir)
fit_strike, fit_dip = mplstereonet.fit_girdle(total_dip, total_dipdir,
measurement="lines")
store, new_lyr_obj = self.add_layer_dataset("line")
self.add_linear_feature(store, fit_strike + 270, 90 - fit_dip)
self.redraw_plot()
def on_toolbutton_linears_to_planes_clicked(self, toolbutton):
# pylint: disable=unused-argument
"""
Finds the plane normal to the selected linears and adds them as planes.
This method calculates the normal planes for all selected linear
layers and adds them as a new plane dataset. This can be used to
calculate the cross-section plane of a set of fold axis.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
if len(row_list) == 0:
return
#Check if all selected layers are linear layers.
only_lines = True
for row in row_list:
lyr_obj = model[row][3]
if lyr_obj.get_layer_type() == "plane":
only_lines = False
elif lyr_obj.get_layer_type() == "faultplane":
only_lines = False
if only_lines is False:
return
store, new_lyr_obj = self.add_layer_dataset("plane")
for row in row_list:
lyr_obj = model[row][3]
strike, dipdir, sense = self.parse_lines(
lyr_obj.get_data_treestore())
for strike, dipdir in zip(strike, dipdir):
self.add_linear_feature(store, strike + 180, 90 - dipdir)
self.redraw_plot()
def on_toolbutton_mean_vector_clicked(self, toolbutton):
"""
Calculates the mean vector and adds it to the project.
Parses line-layers and adds up all the values. then the mean vector
is calculated and added to the project. The legend will show the
dip-direction/dip of the mean vector and the coefficient of
determination (r-value).
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
if len(row_list) == 0:
return
#Check if all selected layers are linear layers.
only_lines = True
for row in row_list:
lyr_obj = model[row][3]
if lyr_obj.get_layer_type() != "line":
only_lines = False
if only_lines is False:
return
total_dipdir = []
total_dip = []
for row in row_list:
lyr_obj = model[row][3]
store = lyr_obj.get_data_treestore()
dipdir, dip, sense = self.parse_lines(store)
for x, y in zip(dipdir, dip):
total_dipdir.append(x)
total_dip.append(y)
vector, r_value = mplstereonet.find_mean_vector(dip, dipdir)
new_store, new_lyr_obj = self.add_layer_dataset("eigenvector")
new_lyr_obj.set_label("Mean Vector")
self.add_linear_feature(new_store, vector[1], vector[0], r_value)
self.redraw_plot()
def convert_lonlat_to_dipdir(self, lon, lat):
"""
Converts lat-lon data to dip-direction and dip.
Expects a longitude and a latitude value. The measurment is forward
transformed into stereonet-space. Then the azimut (dip-direction) and
diping angle are calculated. Returns two values: dip-direction and dip.
"""
#The longitude and latitude have to be forward-transformed to get
#the corect azimuth angle
xy = np.array([[lon, lat]])
xy_trans = self.trans.transform(xy)
x = float(xy_trans[0,0:1])
y = float(xy_trans[0,1:2])
alpha = np.arctan2(x, y)
alpha_deg = np.degrees(alpha)
if alpha_deg < 0:
alpha_deg += 360
#Longitude and Latitude don't need to be converted for rotation.
#The correct dip is the array[1] value once the vector has been
#rotated in north-south position.
array = mplstereonet.stereonet_math._rotate(np.degrees(lon),
np.degrees(lat),
alpha_deg * (-1))
gamma = float(array[1])
gamma_deg = 90 - np.degrees(gamma)
#If the longitude is larger or small than pi/2 the measurment lies
#on the upper hemisphere and needs to be corrected.
if lon > (np.pi / 2) or lon < (-np.pi / 2):
alpha_deg = alpha_deg + 180
return alpha_deg, gamma_deg
def rotate_data(self, raxis, raxis_angle, dipdir, dip):
"""
Rotates a measurment around a rotation axis a set number of degrees.
Expects a rotation-axis, a rotation-angle, a dip-direction and a
dip angle. The measurement is converted to latlot and then passed
to the mplstereonet rotate function.
"""
lonlat = mplstereonet.line(dip, dipdir)
#Rotation around x-axis until rotation-axis azimuth is east-west
rot1 = (90 - raxis[0])
lon1 = np.degrees(lonlat[0])
lat1 = np.degrees(lonlat[1])
lon_rot1, lat_rot1 = mplstereonet.stereonet_math._rotate(lon1, lat1,
theta=rot1, axis="x")
#Rotation around z-axis until rotation-axis dip is east-west
rot2 = -(90 - raxis[1])
lon2 = np.degrees(lon_rot1)
lat2 = np.degrees(lat_rot1)
lon_rot2, lat_rot2 = mplstereonet.stereonet_math._rotate(lon2, lat2,
theta=rot2, axis="z")
#Rotate around the x-axis for the specified rotation:
rot3 = raxis_angle
lon3 = np.degrees(lon_rot2)
lat3 = np.degrees(lat_rot2)
lon_rot3, lat_rot3 = mplstereonet.stereonet_math._rotate(lon3, lat3,
theta=rot3, axis="x")
#Undo the z-axis rotation
rot4 = -rot2
lon4 = np.degrees(lon_rot3)
lat4 = np.degrees(lat_rot3)
lon_rot4, lat_rot4 = mplstereonet.stereonet_math._rotate(lon4, lat4,
theta=rot4, axis="z")
#Undo the x-axis rotation
rot5 = -rot1
lon5 = np.degrees(lon_rot4)
lat5 = np.degrees(lat_rot4)
lon_rot5, lat_rot5 = mplstereonet.stereonet_math._rotate(lon5, lat5,
theta=rot5, axis="x")
dipdir5, dip5 = self.convert_lonlat_to_dipdir(lon_rot5, lat_rot5)
return dipdir5, dip5
def on_toolbutton_ptaxis_clicked(self, toolbutton):
"""
Calculates the PT-Axis of a faultplane, and add adds them to the project
Triggered from the toolbar. One faultplane layer has to be selected.
Iterates over the rows and calculates the p-, t, and b-axis for each
of them.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
if len(row_list) == 0:
return
if len(row_list) > 1:
return
row = row_list[0]
lyr_obj = model[row][3]
lyr_type = lyr_obj.get_layer_type()
if lyr_type != "faultplane":
return
def iterate_over_data(model, path, itr, pbt_store):
"""
Iterates over the faultplane and adds the pt-axis for each row.
For each row the pole-linear-layer is calculated. The pole of
that layer is the rotation axis and the b-axis. The linear is
then rotated for the p-axis and t-axis.
"""
drow = model[path]
p_store = pbt_store[0]
b_store = pbt_store[1]
t_store = pbt_store[2]
fit_strike, fit_dip = mplstereonet.fit_girdle(
[float(drow[3]), 90 - float(drow[1])],
[float(drow[2]), float(drow[0]) + 180],
measurement="lines")
#Plane between pole and linear
self.ax_stereo.plane(fit_strike, fit_dip)
#Rotation axis is pole of pole-linear-plane
raxis = [fit_strike - 90, 90 - fit_dip]
#B-Axis is rotation axis
self.add_linear_feature(b_store, raxis[0], raxis[1])
#Rotate 30° to P-axis
if drow[4] == "dn" or drow[4] == "dex":
rot = 30
else:
rot = -30
p_dipdir, p_dip = self.rotate_data(raxis, rot, drow[2], drow[3])
self.add_linear_feature(p_store, p_dipdir, p_dip)
#Rotate 30°+120=150 to T-axis
if drow[4] == "dn" or drow[4] == "dex":
rot = -60
else:
rot = 60
t_dipdir, t_dip = self.rotate_data(raxis, rot, drow[2], drow[3])
self.add_linear_feature(t_store, t_dipdir, t_dip)
p_store, p_lyr_obj = self.add_layer_dataset("line")
p_lyr_obj.set_marker_fill("#ff0000")
p_lyr_obj.set_marker_fill("#ff0000")
p_lyr_obj.set_label("P-Axis")
b_store, b_lyr_obj = self.add_layer_dataset("line")
b_lyr_obj.set_marker_fill("#ffffff")
b_lyr_obj.set_marker_style("s")
b_lyr_obj.set_label("B-Axis")
t_store, t_lyr_obj = self.add_layer_dataset("line")
t_lyr_obj.set_marker_fill("#0000ff")
t_lyr_obj.set_marker_style("^")
t_lyr_obj.set_label("T-Axis")
pbt_store = [p_store, b_store, t_store]
lyr_store = lyr_obj.get_data_treestore()
lyr_store.foreach(iterate_over_data, pbt_store)
self.redraw_plot()
def layer_row_activated(self, treeview, path, column):
"""
Double clicking a layer, opens the layer-property dialog.
Excecutes when a treeview row is double-clicked. This passes the
treeview-object, the path (or row) as an integer and the
TreeViewColumn-object to this function.
"""
lyr_obj = self.layer_store[path][3]
if lyr_obj is not None:
layer_prop = LayerProperties(lyr_obj, self.redraw_plot, self.main_window)
layer_prop.run()
def on_toolbutton_layer_properties_clicked(self, toolbutton, testing=False):
"""
Triggered when the toolbutton for layer properties is pressed.
Checks if only one layer is selected. If more or less layers are
selected a warning is displayed in the statusbar.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
if len(row_list) == 0:
self.statbar.push(1, ("Please select a layer to customize."))
return
elif len(row_list) > 1:
self.statbar.push(1, ("Please select only one layer to customize."))
return
row = row_list[0]
lyr_obj = self.layer_store[row][3]
layer_prop = LayerProperties(lyr_obj, self.redraw_plot, self.main_window)
if testing == False:
layer_prop.run()
return layer_prop
def layer_selection_changed(self, selection):
"""
When the selection in the layer-view is changed to a layer containing
data, then the data is displayed in the data-view. If more than one
row is sected the data view is removed from the scrolled window.
"""
model, row_list = selection.get_selected_rows()
#If one row is selected show the data view, else don't show it
if len(row_list) == 1:
row = row_list[0]
lyr_obj = model[row][3]
child = self.sw_data.get_child()
if lyr_obj is None:
#If it has a child remove it
if child is not None:
self.sw_data.remove(child)
#Else: not a group layer
else:
#Get the treeview
treeview_object = lyr_obj.get_data_treeview()
#If there is a child remove it
if child is not None:
self.sw_data.remove(child)
#Add new treeview
self.sw_data.add(treeview_object)
data_treeview = lyr_obj.get_data_treeview()
data_selection = data_treeview.get_selection()
data_selection.unselect_all()
self.main_window.show_all()
else:
child = self.sw_data.get_child()
#If there is a child remove it
if child is not None:
self.sw_data.remove(child)
#Add new treeview
self.main_window.show_all()
if self.settings.get_highlight() is True:
self.redraw_plot()
def on_layer_toggled(self, widget, path):
# pylint: disable=unused-argument
"""
Toggles the layer and redraws the plot.
If the layer is toggled the bool field is switched between
True (visible) and False (invisible). Then the plot is redrawn.
"""
self.layer_store[path][0] = not self.layer_store[path][0]
self.redraw_plot()
def create_layer(self, lyr_type):
"""
Creates a layer according to the passed layer type.
Depending on the layer-type a different TreeStore, TreeView and layer
object is created. For folders all of them are None. Returns the new
layer object, a TreeStore and a TreeView.
"""
if lyr_type == "plane":
store = Gtk.ListStore(float, float, str)
view = PlaneDataView(store, self.redraw_plot, self.add_feature,
self.settings)
lyr_obj_new = PlaneLayer(store, view)
elif lyr_type == "faultplane":
store = Gtk.ListStore(float, float, float, float, str)
view = FaultPlaneDataView(store, self.redraw_plot, self.add_feature,
self.settings)
lyr_obj_new = FaultPlaneLayer(store, view)
elif lyr_type == "line":
store = Gtk.ListStore(float, float, str)
view = LineDataView(store, self.redraw_plot, self.add_feature,
self.settings)
lyr_obj_new = LineLayer(store, view)
elif lyr_type == "smallcircle":
store = Gtk.ListStore(float, float, float)
view = SmallCircleDataView(store, self.redraw_plot, self.add_feature,
self.settings)
lyr_obj_new = SmallCircleLayer(store, view)
elif lyr_type == "eigenvector":
store = Gtk.ListStore(float, float, float)
view = EigenVectorView(store, self.redraw_plot, self.add_feature,
self.settings)
lyr_obj_new = EigenVectorLayer(store, view)
elif lyr_type == "folder":
store = None
view = None
lyr_obj_new = None
return lyr_obj_new, store, view
def add_layer_dataset(self, layer_type):
"""
Is called by the different "new layer" toolbuttons. If the number of
selected rows are 0 or more than one, the layer is appended at the end.
If just one row is selected, and the row is a group, then the new
layer is created in that group. Otherwise it is added at the end of the
same level as the selection.
"""
store = None
lyr_obj_new = None
def add_layer(itr, layer_type):
lyr_obj_new, store, view = self.create_layer(layer_type)
view.set_layer_object(lyr_obj_new)
pixbuf = lyr_obj_new.get_pixbuf()
self.layer_store.append(itr,
[True, pixbuf, lyr_obj_new.get_label(), lyr_obj_new])
return store, lyr_obj_new
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
rows = len(row_list)
if rows == 0 or rows > 1:
store, lyr_obj_new = add_layer(None, layer_type)
else:
#If selected item is group, add to group, else: add to level
row = row_list[0]
lyr_obj = model[row][3]
selection_itr = model.get_iter(row_list[0])
if lyr_obj is None:
store, lyr_obj_new = add_layer(selection_itr, layer_type)
self.layer_view.expand_row(row, True)
else:
parent_itr = model.iter_parent(selection_itr)
store, lyr_obj_new = add_layer(parent_itr, layer_type)
return store, lyr_obj_new
def on_toolbutton_create_plane_dataset_clicked(self, widget):
# pylint: disable=unused-argument
"""
When the toolbutton "toolbutton_create_dataset" is pressed this function
creates a new dataset in the currently active layer group.
Each dataset has a corresponding data sheet.
"""
store, lyr_obj_new = self.add_layer_dataset("plane")
return store, lyr_obj_new
def on_toolbutton_create_faultplane_dataset_clicked(self, widget):
# pylint: disable=unused-argument
"""
When the toolbutton "toolbutton_create_dataset" is pressed this function
creates a new dataset in the currently active layer group.
Each dataset has a corresponding data sheet.
"""
store, lyr_obj_new = self.add_layer_dataset("faultplane")
return store, lyr_obj_new
def on_toolbutton_create_line_dataset_clicked(self, widget):
# pylint: disable=unused-argument
"""
Creates a new line data layer.
"""
store, lyr_obj_new = self.add_layer_dataset("line")
return store, lyr_obj_new
def on_toolbutton_create_small_circle_clicked(self, widget):
# pylint: disable=unused-argument
"""
Creates a new small circle layer.
"""
store, lyr_obj_new = self.add_layer_dataset("smallcircle")
return store, lyr_obj_new
def parse_planes(self, treestore, subset=None):
"""
Parses planes and returns a list of strikes, dipdirs and dips.
Parsing converts from dip direction to strikes.
"""
strike = []
dipdir = []
dip = []
for key, row in enumerate(treestore):
if subset is not None and key not in subset:
continue
strike.append(float(row[0]) - 90)
dipdir.append(float(row[0]))
dip.append(float(row[1]))
return strike, dipdir, dip
def parse_faultplanes(self, treestore, subset=None):
"""
Parses a faultplane treestore. Converts planes from dip-direction to
strikes so they can be plotted.
#lp_plane = linear-pole_plane (The great circles that connect the
lineation with the pole of the faultplane. Used for Hoeppener-Plots.
"""
strike = []
plane_dir = []
plane_dip = []
line_dir = []
line_dip = []
sense = []
line_sense_dir = []
line_sense_dip = []
lp_plane_dir = []
lp_plane_dip = []
for key, row in enumerate(treestore):
if subset is not None and key not in subset:
continue
strike.append(float(row[0] - 90))
plane_dir.append(float(row[0]))
plane_dip.append(float(row[1]))
line_dir.append(float(row[2]))
line_dip.append(float(row[3]))
sense.append(row[4])
if row[4] == "up":
line_sense_dir.append(float(row[2]) + 180)
line_sense_dip.append(90 - float(row[3]))
elif row[4] == "dn":
line_sense_dir.append(float(row[2]))
line_sense_dip.append(float(row[3]))
fit_strike, fit_dip = mplstereonet.fit_girdle(
[float(row[3]), 90 - float(row[1])],
[float(row[2]), float(row[0]) + 180],
measurement="lines")
lp_plane_dir.append(fit_strike)
lp_plane_dip.append(fit_dip)
return strike, plane_dir, plane_dip, line_dir, line_dip, sense, \
line_sense_dir, line_sense_dip, lp_plane_dir, lp_plane_dip
def parse_lines(self, treestore, subset=None):
"""
Parses linear data with the 3 columns dip direction, dip and sense.
Returns a python-list for each column.
"""
line_dir = []
line_dip = []
sense = []
for key, row in enumerate(treestore):
if subset is not None and key not in subset:
continue
line_dir.append(float(row[0]))
line_dip.append(float(row[1]))
sense.append(row[2])
return line_dir, line_dip, sense
def parse_eigenvectors(self, treestore, subset=None):
"""
Parses a eigenvector layer and returns a list of each column
This method expect a TreeStore that stores the data of a layer. It
iterates over the rows and adds each column to a list. It returns 3
lists for line_dir, line_dip (the eigenvector) and values (the
eigenvalue)
"""
line_dir = []
line_dip = []
values = []
for key, row in enumerate(treestore):
if subset is not None and key not in subset:
continue
line_dir.append(float(row[0]))
line_dip.append(float(row[1]))
values.append(float(row[2]))
return line_dir, line_dip, values
def parse_smallcircles(self, treestore, subset=None):
"""
Parses small circle data. Data has 3 columns: Dip direction, dip and
opening angle.
"""
line_dir = []
line_dip = []
angle = []
for key, row in enumerate(treestore):
if subset is not None and key not in subset:
continue
line_dir.append(float(row[0]))
line_dip.append(float(row[1]))
angle.append(float(row[2]))
return line_dir, line_dip, angle
def draw_plane(self, lyr_obj, dipdir, dip, highlight=False):
"""
Function draws a great circle in the stereonet. It calls the formatting
from the layer object.
"""
num_data = len(dipdir)
lbl = "{} ({})".format(lyr_obj.get_label(), num_data)
if highlight is False:
self.ax_stereo.plane(dipdir, dip, color=lyr_obj.get_line_color(),
label=lbl,
linewidth=lyr_obj.get_line_width(),
linestyle=lyr_obj.get_line_style(),
dash_capstyle=lyr_obj.get_capstyle(),
alpha=lyr_obj.get_line_alpha(), clip_on=False)
else:
self.ax_stereo.plane(dipdir, dip, color=lyr_obj.get_line_color(),
linewidth=lyr_obj.get_line_width() + 2,
linestyle=lyr_obj.get_line_style(),
dash_capstyle=lyr_obj.get_capstyle(),
alpha=lyr_obj.get_line_alpha(), clip_on=False)
def draw_line(self, lyr_obj, dipdir, dip, highlight=False):
"""
Function draws a linear element in the stereonet. It calls the
formatting from the layer object.
"""
num_data = len(dipdir)
lbl = "{} ({})".format(lyr_obj.get_label(), num_data)
if highlight is False:
#ax.line takes dip first and then dipdir (as strike)
self.ax_stereo.line(dip, dipdir, marker=lyr_obj.get_marker_style(),
markersize=lyr_obj.get_marker_size(),
color=lyr_obj.get_marker_fill(),
label=lbl,
markeredgewidth=lyr_obj.get_marker_edge_width(),
markeredgecolor=lyr_obj.get_marker_edge_color(),
alpha=lyr_obj.get_marker_alpha(), clip_on=False)
else:
self.ax_stereo.line(dip, dipdir, marker=lyr_obj.get_marker_style(),
markersize=lyr_obj.get_marker_size(),
color=lyr_obj.get_marker_fill(),
markeredgewidth=lyr_obj.get_marker_edge_width() + 2,
markeredgecolor=lyr_obj.get_marker_edge_color(),
alpha=lyr_obj.get_marker_alpha(), clip_on=False)
def draw_eigenvector(self, lyr_obj, dipdir, dip, values, highlight=False):
"""
Draws the eigenvectors as lines and adds the eigenvalues to the legend.
This method is called from the redraw_plot method to draw a eigenvector
layer. It expects a layer object and arrays for dip-direction, dips and
values. The arrays are rounded and converted to strings for the legend.
"""
dipdir = np.round(dipdir, 1).tolist()
dip = np.round(dip, 1).tolist()
values = np.round(values, 2).tolist()
dipdir_str = []
dip_str = []
values_str = []
for x in dipdir:
dipdir_str.append(str(x).rjust(5, "0"))
for y in dip:
dip_str.append(str(y).rjust(4, "0"))
for v in values:
values_str.append(str(v))
lbl = "{} \n".format(lyr_obj.get_label())
for key, value in enumerate(dipdir):
lbl += " {}/{}, {}\n".format(dipdir_str[key], dip_str[key],
values_str[key])
if highlight is False:
#ax.line takes dip first and then dipdir (as strike)
self.ax_stereo.line(dip, dipdir, marker=lyr_obj.get_marker_style(),
markersize=lyr_obj.get_marker_size(),
color=lyr_obj.get_marker_fill(),
label=lbl,
markeredgewidth=lyr_obj.get_marker_edge_width(),
markeredgecolor=lyr_obj.get_marker_edge_color(),
alpha=lyr_obj.get_marker_alpha(), clip_on=False)
else:
self.ax_stereo.line(dip, dipdir, marker=lyr_obj.get_marker_style(),
markersize=lyr_obj.get_marker_size() + 2,
color=lyr_obj.get_marker_fill(),
markeredgewidth=lyr_obj.get_marker_edge_width(),
markeredgecolor=lyr_obj.get_marker_edge_color(),
alpha=lyr_obj.get_marker_alpha(), clip_on=False)
def draw_smallcircles(self, lyr_obj, dipdir, dip, angle, highlight=False):
"""
Function draws small circles in the stereonet. It calls the formatting
from the layer object.
"""
if highlight is False:
#ax.cone takes dip first and then dipdir!
#facecolor needs to be "None" because there is a bug with which side to fill
self.ax_stereo.cone(dip, dipdir, angle, facecolor="None",
edgecolor=lyr_obj.get_line_color(),
linewidth=lyr_obj.get_line_width(),
label=lyr_obj.get_label(),
linestyle=lyr_obj.get_line_style())
else:
self.ax_stereo.cone(dip, dipdir, angle, facecolor="None",
edgecolor=lyr_obj.get_line_color(),
linewidth=lyr_obj.get_line_width() + 2,
label=lyr_obj.get_label(),
linestyle=lyr_obj.get_line_style())
num_data = len(dipdir)
lbl = "{} ({})".format(lyr_obj.get_label(), num_data)
handler = Line2D([], [], color=lyr_obj.get_line_color(),
linewidth=lyr_obj.get_line_width(),
linestyle=lyr_obj.get_line_style(),
dash_capstyle=lyr_obj.get_capstyle(),
alpha=lyr_obj.get_line_alpha())
return handler, lbl
def draw_mean_vector(self, lyr_obj, dipdir, dip, highlight=False):
"""
Draws the mean vector of the current linear layer.
"""
if len(dipdir) == 0:
return
vector, r_value = mplstereonet.find_mean_vector(dip, dipdir)
self.ax_stereo.line(vector[0], vector[1], marker="d",
markersize=8,
color="#ff0000",
markeredgewidth=1,
markeredgecolor="#000000",
clip_on=False)
def draw_fisher_smallcircle(self, lyr_obj, dipdir, dip, highlight=False):
"""
Draws the confidence small circle of the current linear layer.
"""
if len(dipdir) == 0:
return
confidence = lyr_obj.get_fisher_conf()
vector, stats = mplstereonet.find_fisher_stats(dip, dipdir, conf=confidence)
self.ax_stereo.cone(vector[0], vector[1], stats[1], facecolor="None",
color=lyr_obj.get_line_color(),
linewidth=lyr_obj.get_line_width(),
label=lyr_obj.get_label(),
linestyle=lyr_obj.get_line_style())
def draw_poles(self, lyr_obj, dipdir, dip, highlight=False):
"""
Function draws a plane pole in the stereonet. It calls the formatting
from the layer object.
"""
num_data = len(dipdir)
lbl = "Poles of {} ({})".format(lyr_obj.get_label(), num_data)
if highlight is False:
self.ax_stereo.pole(dipdir, dip, marker=lyr_obj.get_pole_style(),
markersize=lyr_obj.get_pole_size(),
color=lyr_obj.get_pole_fill(),
label=lbl,
markeredgewidth=lyr_obj.get_pole_edge_width(),
markeredgecolor=lyr_obj.get_pole_edge_color(),
alpha=lyr_obj.get_pole_alpha(), clip_on=False)
else:
self.ax_stereo.pole(dipdir, dip, marker=lyr_obj.get_pole_style(),
markersize=lyr_obj.get_pole_size() + 2,
color=lyr_obj.get_pole_fill(),
markeredgewidth=lyr_obj.get_pole_edge_width(),
markeredgecolor=lyr_obj.get_pole_edge_color(),
alpha=lyr_obj.get_pole_alpha(), clip_on=False)
def draw_contours(self, lyr_obj, dipdir, dips, measure_type):
"""
MplStereonet accepts measurements as "poles" for planes and
"lines" for linear measurements.
"""
if len(dipdir) == 0:
return None
if lyr_obj.get_manual_range() == True:
lower = lyr_obj.get_lower_limit()
upper = lyr_obj.get_upper_limit()
steps = lyr_obj.get_steps()
cont_interval = np.linspace(lower, upper, num=steps)
else:
cont_interval = None
#Implement hatches = (['-', '+', 'x', '\\', '*', 'o', 'O', '.'])
if lyr_obj.get_draw_contour_fills() == True:
cbar = self.ax_stereo.density_contourf(dipdir, dips,
measurement=measure_type,
method=lyr_obj.get_contour_method(),
gridsize=lyr_obj.get_contour_resolution(),
cmap=lyr_obj.get_colormap(),
sigma=lyr_obj.get_contour_sigma(),
levels=cont_interval)
else:
cbar = None
if lyr_obj.get_draw_contour_lines() == True:
if lyr_obj.get_use_line_color() == True:
clines = self.ax_stereo.density_contour(dipdir, dips,
measurement=measure_type,
method = lyr_obj.get_contour_method(),
gridsize = lyr_obj.get_contour_resolution(),
sigma = lyr_obj.get_contour_sigma(),
colors = lyr_obj.get_contour_line_color(),
linewidths = lyr_obj.get_contour_line_width(),
linestyles = lyr_obj.get_contour_line_style(),
levels=cont_interval)
else:
clines = self.ax_stereo.density_contour(dipdir, dips,
measurement=measure_type,
method = lyr_obj.get_contour_method(),
gridsize = lyr_obj.get_contour_resolution(),
sigma = lyr_obj.get_contour_sigma(),
cmap = lyr_obj.get_colormap(),
linewidths = lyr_obj.get_contour_line_width(),
linestyles = lyr_obj.get_contour_line_style(),
levels=cont_interval)
if lyr_obj.get_draw_contour_labels() == True:
if clines is not None:
self.ax_stereo.clabel(clines,
fontsize = lyr_obj.get_contour_label_size())
self.cbar.append(cbar)
def draw_angelier(self, values):
"""
Draws the Angelier arrows for a fault plane layer.
Receives the data as a list. Iterates over arrow-position and the sense
and displays the resulting arrow.
"""
lyr_obj, plane_dir, plane_dip, strikes, \
line_dir, line_dip, lp_plane_dir, lp_plane_dip, sense = values
lon, lat = mplstereonet.line(line_dip, line_dir)
for x, y, sns in zip(lon, lat, sense):
mag = np.hypot(x, y)
u, v = x / mag, y / mag
if sns == "up":
self.ax_stereo.quiver(x, y, -u, -v, width=1.5, headwidth=4,
units="dots", pivot="middle",
color=lyr_obj.get_arrow_color())
elif sns == "dn":
self.ax_stereo.quiver(x, y, u, v, width=1.5, headwidth=4,
units="dots", pivot="middle",
color=lyr_obj.get_arrow_color())
elif sns == "sin":
pass
elif sns == "dex":
pass
else:
pass
return None
def draw_hoeppener(self, lyr_obj, plane_dir, plane_dip, line_dir,
line_dip, lp_plane_dir, lp_plane_dip, sense):
"""
Receives data from a faultplane and draws a Hoeppener arrow.
Triggered by the redraw_plot function.
Receives a plane (direction and dip), linear (direction and dip), and
the plane that connects them to each other (direction and dip). Finds
the closest index to the pole on the pole-linear-plane and uses that
index as the center of the arrow. The arrow connects the two points on
the pole-linear-plane that lie f = 2 indexes in either direction. The
length is corrected if it obviously crosses the stereonet (length > 1).
Then the start and end direction is determined by the shear sense. If
the datapoint has no shear sense no arrow is drawn. Unknown shear sense
is just a line. The arrow direction is determined like this:
-------------
"up" (overthrust) Arrow should point away from equator.
"dn" (downthrust) Arrow should point towards the equator.
"sin" (sinistral strike-slip) Arrows should point left.
"dex" (dextral strike-slip) Arrows should point right.
__!!__ Still has a bug. Some orientations are wrong!
"""
if len(line_dir) == 0:
return
def find_nearest_point(plane_stack, point):
"""
Finds the closest index to the pole on the pole-linear-plane.
The pole lies on the pole-linear plane. The index that is closest to
this point is returned and further used as the center of the arrow.
"""
tree = spatial.cKDTree(plane_stack)
dist, index = tree.query(point)
return index
for k, x in enumerate(plane_dir):
plane_lons, plane_lats = mplstereonet.plane(lp_plane_dir[k],
lp_plane_dip[k])
line_lons, line_lats = mplstereonet.pole(plane_dir[k] - 90,
plane_dip[k])
plane_stack = np.dstack([plane_lons.ravel(), plane_lats.ravel()])[0]
point = np.array([line_lons, line_lats]).transpose()
i = find_nearest_point(plane_stack, point)
#This solution works well for short arrows. Longer arrows bypass
#the pole point. If length of arrow is a concern this needs to be
#redone in a different way.
f = 2
lon_start = plane_lons[i-f][0][0]
lat_start = plane_lats[i-f][0][0]
lon_end = plane_lons[i+f][0][0]
lat_end = plane_lats[i+f][0][0]
dlon = lon_end - lon_start
dlat = lat_end - lat_start
#If the arrow crosses the stereonet, the arrow is moved so long
#until the closer point touches the edge of the stereonet
c = 0
while dlon > 1 or dlat > 1:
c = c + 1
if abs(lon_start) > abs(lon_end) or abs(lat_start) > (lat_end):
lon_start = plane_lons[i-f+c][0][0]
lat_start = plane_lats[i-f+c][0][0]
lon_end = plane_lons[i+f+c][0][0]
lat_end = plane_lats[i+f+c][0][0]
else:
lon_start = plane_lons[i-f-c][0][0]
lat_start = plane_lats[i-f-c][0][0]
lon_end = plane_lons[i+f-c][0][0]
lat_end = plane_lats[i+f-c][0][0]
dlon = lon_end - lon_start
dlat = lat_end - lat_start
#Correct the direction of the arrow
if sense[k] == "up":
if abs(lon_start) > abs(lon_end):
lon_start, lon_end = lon_end, lon_start
lat_start, lat_end = lat_end, lat_start
elif sense[k] == "dn":
if abs(lon_start) < abs(lon_end):
lon_start, lon_end = lon_end, lon_start
lat_start, lat_end = lat_end, lat_start
elif sense[k] == "sin":
if lon_start > lon_end:
lon_start, lon_end = lon_end, lon_start
lat_start, lat_end = lat_end, lat_start
elif sense[k] == "dex":
if lon_start < lon_end:
lon_start, lon_end = lon_end, lon_start
lat_start, lat_end = lat_end, lat_start
#Draw line for "uk", nothing for "" and arrow for everything else
#__!!__ The arrows direction might not be determined by
#xy = start and xytext = end
if sense[k] == "uk":
self.ax_stereo.annotate("", xy = (lon_end, lat_end),
xytext = (lon_start, lat_start),
xycoords = "data",
textcoords = "data",
arrowprops = dict(arrowstyle = "-",
connectionstyle = "arc3"))
elif sense[k] == "":
pass
else:
self.ax_stereo.annotate("", xy = (lon_end, lat_end),
xytext = (lon_start, lat_start),
xycoords = "data",
textcoords = "data",
arrowprops = dict(arrowstyle = "->",
connectionstyle = "arc3"))
def plot_layer(self, lyr_obj, subset=None, highlight=False):
"""
Plots a certain layer or subset of layer.
The method expect a layer-object which should be plotted. If only a
subset should be plotted, a list containing the row number sof the
subset has to be passed additionally. If the layer or subset should be
highlighted the method additionally expect a boolean keyword argument:
highlight = True. Each layer and subset is parsed and then passed to
the respective drawing functions.
"""
if lyr_obj == None:
lyr_type = "group"
else:
lyr_type = lyr_obj.get_layer_type()
store = lyr_obj.get_data_treestore()
if lyr_type == "plane":
strike, dipdir, dip = self.parse_planes(store, subset)
if lyr_obj.get_draw_gcircles() == True:
self.draw_plane(lyr_obj, strike, dip, highlight=highlight)
if lyr_obj.get_draw_poles() == True:
self.draw_poles(lyr_obj, strike, dip, highlight=highlight)
self.draw_contours(lyr_obj, strike, dip, "poles")
if self.ax_rose is not None:
num_bins = 360 / lyr_obj.get_rose_spacing()
bin_width = 2 * np.pi / num_bins
dipdir = np.radians(dipdir)
values, bin_edges = np.histogram(dipdir, num_bins,
range = (0, 2 * np.pi))
self.ax_rose.bar(left = bin_edges[:-1], height = values,
width = bin_width, alpha=0.5,
color = lyr_obj.get_line_color(),
edgecolor = lyr_obj.get_pole_edge_color(),
bottom = lyr_obj.get_rose_bottom())
if self.ax_drose is not None:
num_bins = 90 / lyr_obj.get_dip_rose_spacing()
bin_width = (np.pi / 2) / num_bins
dip = np.radians(dip)
values, bin_edges = np.histogram(dip, num_bins,
range = (0, np.pi / 2))
self.ax_drose.bar(left = bin_edges[:-1], height = values,
width = bin_width, alpha=0.5,
color = lyr_obj.get_line_color(),
edgecolor = lyr_obj.get_pole_edge_color(),
bottom = lyr_obj.get_rose_bottom())
elif lyr_type == "line":
dipdir, dip, sense = self.parse_lines(store, subset)
if lyr_obj.get_draw_linears() == True:
self.draw_line(lyr_obj, dipdir, dip, highlight=highlight)
self.draw_contours(lyr_obj, dip, dipdir, "lines")
if self.ax_rose is not None:
num_bins = 360 / lyr_obj.get_rose_spacing()
bin_width = 2 * np.pi / num_bins
dipdir = np.radians(dipdir)
values, bin_edges = np.histogram(dipdir, num_bins,
range = (0, 2 * np.pi))
self.ax_rose.bar(left = bin_edges[:-1], height = values,
width = bin_width, alpha=0.5,
color = lyr_obj.get_marker_fill(),
edgecolor = lyr_obj.get_marker_edge_color(),
bottom = lyr_obj.get_rose_bottom())
if self.ax_drose is not None:
num_bins = 90 / lyr_obj.get_dip_rose_spacing()
bin_width = (np.pi / 2) / num_bins
dip = np.radians(dip)
values, bin_edges = np.histogram(dip, num_bins,
range = (0, np.pi / 2))
self.ax_drose.bar(left = bin_edges[:-1], height = values,
width = bin_width, alpha=0.5,
color = lyr_obj.get_marker_fill(),
edgecolor = lyr_obj.get_marker_edge_color(),
bottom = lyr_obj.get_rose_bottom())
if lyr_obj.get_draw_mean_vector() == True:
self.draw_mean_vector(lyr_obj, dipdir, dip)
if lyr_obj.get_draw_fisher_sc() == True:
self.draw_fisher_smallcircle(lyr_obj, dipdir, dip)
elif lyr_type == "faultplane":
strike, plane_dir, plane_dip, line_dir, line_dip, \
sense, line_sense_dir, line_sense_dip, \
lp_plane_dir, lp_plane_dip = (
self.parse_faultplanes(store, subset))
if lyr_obj.get_draw_gcircles() == True:
self.draw_plane(lyr_obj, strike, plane_dip, highlight=highlight)
if lyr_obj.get_draw_poles() == True:
self.draw_poles(lyr_obj, strike, plane_dip, highlight=highlight)
if lyr_obj.get_draw_linears() == True:
self.draw_line(lyr_obj, line_dir, line_dip, highlight=highlight)
if lyr_obj.get_draw_lp_plane() == True:
self.ax_stereo.plane(lp_plane_dir, lp_plane_dip,
linestyle="dotted",
color="#000000", highlight=highlight)
if lyr_obj.get_draw_hoeppener() == True:
self.draw_hoeppener(lyr_obj, plane_dir, plane_dip,
line_dir, line_dip, lp_plane_dir,
lp_plane_dip, sense)
if lyr_obj.get_draw_angelier() == True:
self.draw_angelier([lyr_obj, plane_dir, plane_dip, strike,
line_dir, line_dip, lp_plane_dir,
lp_plane_dip, sense])
elif lyr_type == "smallcircle":
dipdir, dip, angle = self.parse_smallcircles(store, subset)
handler, label = self.draw_smallcircles(lyr_obj, dipdir,
dip, angle,
highlight=highlight)
self.sc_labels.append(label)
self.sc_handlers.append(handler)
elif lyr_type == "eigenvector":
dipdir, dip, values = self.parse_lines(store, subset)
if lyr_obj.get_draw_linears() == True:
self.draw_eigenvector(lyr_obj, dipdir, dip, values,
highlight=highlight)
self.draw_contours(lyr_obj, dip, dipdir, "lines")
def highlight_selection(self, deselected):
"""
Gets the current selection and highlights it in the plot.
If only a layer is selected the layer is passed to the redrawing
function. If one or more data-rows are selected the row-numbers are
stored in a subset list and passed with the layer to the redrawing
function.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
def highlight_layers(deselected):
for row in row_list:
lyr_obj = model[row][3]
if lyr_obj == None:
continue
path = str(row)
draw = True
for d in deselected:
if path.startswith(d) == True:
draw = False
if draw == False:
continue
self.plot_layer(lyr_obj, highlight=True)
def highlight_rows(lyr_obj, data_row_list):
row_list_ints = []
for row in data_row_list:
row_list_ints.append(row.get_indices()[0])
self.plot_layer(lyr_obj, row_list_ints, highlight=True)
if len(row_list) == 1:
row = row_list[0]
lyr_obj = model[row][3]
if lyr_obj == None:
return
data_view = lyr_obj.get_data_treeview()
data_selection = data_view.get_selection()
data_model, data_row_list = data_selection.get_selected_rows()
if len(data_row_list) > 0:
highlight_rows(lyr_obj, data_row_list)
else:
highlight_layers(deselected)
elif len(row_list) == 0:
return
else:
highlight_layers(deselected)
def redraw_plot(self, checkout_canvas=False):
"""
This function is called after any changes to the datasets or when
adding or deleting layer. The plot is cleared and then redrawn.
layer[3] = layer object
"""
self.cbar = []
def inverted_transform_stereonet():
"""
The inverted transform of the stereonet depends on the projection.
Equal area and equal angle projections have different tranformation
functions. The appropriate on has be set.
"""
self.inv = self.settings.get_inverse_transform()
if self.view_changed == True or checkout_canvas == True:
self.view_changed = False
if self.view_mode == "stereonet":
self.ax_stereo, self.ax_cbar = self.settings.get_stereonet()
inverted_transform_stereonet()
elif self.view_mode == "stereo-rose":
self.ax_stereo, self.ax_rose, self.ax_cbar = self.settings.get_stereo_rose()
inverted_transform_stereonet()
elif self.view_mode == "stereo-two-rose":
self.ax_stereo, self.ax_rose, self.ax_drose, self.ax_cbar = self.settings.get_stereo_two_rose()
inverted_transform_stereonet()
elif self.view_mode == "rose":
self.ax_rose = self.settings.get_rose_diagram()
elif self.view_mode == "pt":
self.inv = self.settings.get_inverse_transform()
self.ax_stereo, self.ax_fluc, self.ax_mohr = (
self.settings.get_pt_view())
inverted_transform_stereonet()
def clear_stereo():
self.ax_stereo.cla()
self.ax_stereo.set_title("ax_stereo", visible=False)
def clear_rose():
self.ax_rose.cla()
self.ax_rose.set_title("ax_rose", visible=False)
def clear_drose():
self.ax_drose.cla()
self.ax_drose.set_title("ax_drose", visible=False)
def clear_fluc():
self.ax_fluc.cla()
self.ax_fluc.set_title("ax_fluc", visible=False)
def clear_mohr():
self.ax_mohr.cla()
self.ax_mohr.set_title("ax_mohr", visible=False)
if self.view_mode == "stereonet":
clear_stereo()
elif self.view_mode == "stereo-rose":
clear_stereo()
clear_rose()
elif self.view_mode == "stereo-two-rose":
clear_stereo()
clear_rose()
clear_drose()
elif self.view_mode == "rose":
clear_rose()
elif self.view_mode == "pt":
clear_stereo()
clear_fluc()
clear_mohr()
if self.settings.get_draw_grid_state() == True:
self.ax_stereo.grid(linestyle = self.settings.get_grid_linestyle(),
color = self.settings.get_grid_color(),
linewidth = self.settings.get_grid_width())
if self.settings.get_show_cross() == True:
self.ax_stereo.annotate("", xy = (-0.03, 0),
xytext = (0.03, 0),
xycoords = "data",
arrowprops = dict(arrowstyle = "-",
connectionstyle = "arc3"))
self.ax_stereo.annotate("", xy = (0, -0.03),
xytext = (0, 0.03),
xycoords = "data",
arrowprops = dict(arrowstyle = "-",
connectionstyle = "arc3"))
if self.settings.get_show_north() == True:
self.ax_stereo.set_azimuth_ticks([0], labels=['N'])
self.deselected = []
def iterate_over_rows(model, path, itr):
lyr_obj = model[path][3]
if lyr_obj is not None:
layer_type = lyr_obj.get_layer_type()
model[path][2] = lyr_obj.get_label()
model[path][1] = lyr_obj.get_pixbuf()
else:
layer_type = "group"
if model[path][0] == False:
self.deselected.append(str(path))
return
draw = True
for d in self.deselected:
if str(path).startswith(d) == True:
draw = False
if draw == False:
return
self.plot_layer(lyr_obj)
self.sc_labels = []
self.sc_handlers = []
self.layer_store.foreach(iterate_over_rows)
one_cbar = False
for cbar in self.cbar:
if cbar is not None:
self.ax_cbar.axis("on")
cb = self.fig.colorbar(cbar, cax=self.ax_cbar)
one_cbar = True
break
if one_cbar == False:
self.ax_cbar.cla()
self.ax_cbar.axis("off")
if self.settings.get_highlight() is True:
self.highlight_selection(self.deselected)
if self.settings.get_draw_legend() == True:
handles, labels = self.ax_stereo.get_legend_handles_labels()
newLabels, newHandles = [], []
for handle, label in zip(handles, labels):
if label not in newLabels:
newLabels.append(label)
newHandles.append(handle)
for handle, label in zip(self.sc_handlers, self.sc_labels):
if label not in newLabels:
newLabels.append(label)
newHandles.append(handle)
if len(newHandles) is not 0:
self.ax_stereo.legend(newHandles, newLabels,
bbox_to_anchor=(1.5, 1.1), borderpad=1,
numpoints=1)
self.canvas.draw()
def on_toolbutton_create_group_layer_clicked(self, widget):
"""
When the toolbutton "toolbutton_create_layer" is pressed this function
calls the "add_layer"-function of the TreeStore. The called function
creates a new layer-group at the end of the view.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
same_depth = True
def check_same_depth(rows):
return rows[1:] == rows[:-1]
if len(row_list) > 0:
first_path = row_list[0]
first_itr = model.get_iter(first_path)
#If no row is selected then the group is added to the end of the view
if len(row_list) == 0:
model.append(None,
[True, self.settings.get_folder_icon(), _("Group Layer"), None])
else:
depth_list = []
for row in row_list:
itr = model.get_iter(row)
depth_list.append(self.layer_store.iter_depth(itr))
if check_same_depth(depth_list) == False:
same_depth = False
print("Selection is not on the same depth")
selection.unselect_all()
return
def move_rows(parent_itr, itr):
"""
Adds each row to the parent iter. First call is new group and
first row that was selected.
Checks if it has children. If yes, it start a recursive loop.
"""
#ov = old values
ov = model[itr]
new_itr = model.append(parent_itr, [ov[0], ov[1], ov[2], ov[3]])
children_left = model.iter_has_child(itr)
while children_left == True:
child = model.iter_children(itr)
move_rows(new_itr, child)
model.remove(child)
children_left = model.iter_has_child(itr)
if same_depth == True and len(row_list) > 0:
selection_itr = model.get_iter(row_list[0])
parent_itr = model.iter_parent(selection_itr)
new_group_itr = model.insert_before(parent_itr, selection_itr,
[True, self.settings.get_folder_icon(),
_("Group Layer"), None])
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
for row in reversed(row_list):
k = model[row]
itr = model.get_iter(row)
move_rows(new_group_itr, itr)
model.remove(itr)
new_path = model.get_path(new_group_itr)
self.layer_view.expand_row(new_path, True)
def layer_name_edited(self, widget, path, new_label):
"""
When the layer name is edited this function passes the new label to the
TreeStore along with the correct path.
"""
self.layer_store[path][2] = new_label
lyr_obj = self.layer_store[path][3]
if lyr_obj is not None:
lyr_obj.set_label(new_label)
self.redraw_plot()
def on_menuitem_about_activate(self, widget):
"""
Triggered when the menuitem "about" is pressed. Creates an instance
of the AboutDialog class and calls the function "run" within that class
to show the dialog.
"""
about = AboutDialog(self.main_window)
about.run()
def on_menuitem_quit_activate(self, widget):
"""
Triggered when the main window is closed from the menu. Terminates the
Gtk main loop.
"""
Gtk.main_quit()
def on_main_window_destroy(self, widget):
"""
Triggered when the main window is closed with the x-Button.
Terminates the Gtk main loop
"""
Gtk.main_quit()
def on_toolbutton_remove_feature_clicked(self, widget):
"""
Triggered when the toolbutton "remove feature" is clicked. Removes all
the selected data rows from the currently active layer.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
if len(row_list) == 1:
row = row_list[0]
lyr_obj = model[row][3]
data_treeview = lyr_obj.get_data_treeview()
data_treestore = lyr_obj.get_data_treestore()
data_selection = data_treeview.get_selection()
data_model, data_row_list = data_selection.get_selected_rows()
treeiter_list = []
for p in reversed(data_row_list):
itr = data_model.get_iter(p)
data_treestore.remove(itr)
self.redraw_plot()
def convert_xy_to_dirdip(self, event):
"""
Converts xy-coordinates of a matplotlib-event into dip-direction/dip
by using the inverse transformation of mplstereonet. Returns floats in
degree.
"""
alpha = np.arctan2(event.xdata, event.ydata)
alpha_deg = np.degrees(alpha)
if alpha_deg < 0:
alpha_deg += 360
xy = np.array([[event.xdata, event.ydata]])
xy_trans = self.inv.transform(xy)
x = float(xy_trans[0,0:1])
y = float(xy_trans[0,1:2])
array = mplstereonet.stereonet_math._rotate(np.degrees(x),
np.degrees(y), (-1)*alpha_deg)
gamma = float(array[1])
gamma_deg = 90 - np.degrees(gamma)
return alpha_deg, gamma_deg
def add_planar_feature(self, datastore, dip_direct=0, dip=0, sense=""):
"""
Adds a planar feature row. Defaults to an empty row unless a dip
direction and dip are given.
"""
while dip_direct > 360:
dip_direct = dip_direct - 360
while dip_direct < 0:
dip_direct = dip_direct + 360
while dip > 90:
dip = dip - 90
while dip < 0:
dip = dip + 90
itr = datastore.append([dip_direct, dip, sense])
return itr
def add_linear_feature(self, datastore, dip_direct=0, dip=0, sense=""):
"""
Adds a linear feature row. Defaults to an empty row unless a dip
direction and dip are given.
"""
while dip_direct > 360:
dip_direct = dip_direct - 360
while dip_direct < 0:
dip_direct = dip_direct + 360
while dip > 90:
dip = dip - 90
while dip < 0:
dip = dip + 90
itr = datastore.append([dip_direct, dip, sense])
return itr
def add_eigenvector_feature(self, datastore, dip_direct=0, dip=0, value=0):
"""
Adds an eigenvector feature.
Checks if the values lie in the normal range of degrees. Then the
row is appended to the treestore that is passed to the method.
"""
while dip_direct > 360:
dip_direct = dip_direct - 360
while dip_direct < 0:
dip_direct = dip_direct + 360
while dip > 90:
dip = dip - 90
while dip < 0:
dip = dip + 90
itr = datastore.append([dip_direct, dip, value])
return itr
def add_faultplane_feature(self, datastore, dip_direct = 0, dip = 0,
ldip_direct = 0, ldip = 0, sense = ""):
"""
Adds a faultplane feature at the
"""
itr = datastore.append([dip_direct, dip, ldip_direct, ldip, sense])
return itr
def add_smallcircle_feature(self, datastore, dip_direct=0, dip=0,
angle=10):
"""
Adds a small circle feature row. Defaults to an empty row unless a dip
direction and dip are given.
"""
itr = datastore.append([dip_direct, dip, angle])
return itr
def add_feature(self, layer_type, store, *args):
"""
Adds a feature to a layer.
Exepects a layer-type and a datastore. Additional arguments are passed
to the specific function (e.g. dipdirection or dip)
"""
if layer_type == "plane":
itr = self.add_planar_feature(store, *args)
if layer_type == "line":
itr = self.add_linear_feature(store, *args)
if layer_type == "faultplane":
itr = self.add_faultplane_feature(store, *args)
if layer_type == "smallcircle":
itr = self.add_smallcircle_feature(store, *args)
if layer_type == "eigenvector":
itr = self.add_eigenvector_feature(store, *args)
def on_toolbutton_add_feature_clicked(self, widget):
"""
Adds an empty row to the currently selected data layer.
The newly created is selected for easier editing.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
if len(row_list) == 1:
layer = row_list[0]
current = model[layer][3]
data_treestore = current.get_data_treestore()
if data_treestore is not None:
layer_type = current.get_layer_type()
if layer_type == "plane":
itr = self.add_planar_feature(data_treestore)
elif layer_type == "line":
itr = self.add_linear_feature(data_treestore)
elif layer_type == "faultplane":
itr = self.add_faultplane_feature(data_treestore)
elif layer_type == "smallcircle":
itr = self.add_smallcircle_feature(data_treestore)
elif layer_type == "eigenvector":
itr = self.add_eigenvector_feature(data_treestore)
data_treeview = model[layer][3].get_data_treeview()
data_selection = data_treeview.get_selection()
data_selection.unselect_all()
data_selection.select_iter(itr)
def mpl_canvas_clicked(self, event):
"""
If the edit mode is off, clicking anywhere on the mpl canvas should
deselect the layer treeview.
If the edit mode is on the layer should stay selected and each
click should draw a feature.
"""
selection = self.layer_view.get_selection()
if event.inaxes is not None:
if self.draw_features == False:
selection.unselect_all()
return
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
if len(row_list) == 1:
if event.inaxes is not None:
alpha_deg, gamma_deg = self.convert_xy_to_dirdip(event)
else:
selection.unselect_all()
return
layer = row_list[0]
lyr_obj = model[layer][3]
if lyr_obj == None:
#Layer is a layer-group
return
data_treestore = lyr_obj.get_data_treestore()
if data_treestore is not None:
layer_type = lyr_obj.get_layer_type()
if layer_type == "plane":
self.add_planar_feature(data_treestore, alpha_deg,
gamma_deg)
if layer_type == "line":
self.add_linear_feature(data_treestore, alpha_deg,
gamma_deg)
if layer_type == "faultplane":
self.add_faultplane_feature(data_treestore, alpha_deg,
gamma_deg)
if layer_type == "smallcircle":
self.add_smallcircle_feature(data_treestore, alpha_deg,
gamma_deg)
self.redraw_plot()
else:
if self.draw_features == False:
selection.unselect_all()
def mpl_motion_event(self, mpl_event):
"""
Catches motion events on the mpl canvas and plots.
Updates the StatusBar.
"""
self.update_statusbar(mpl_event)
def eventbox_motion(self, widget, event):
"""
Catches motion events and calls the updating of the StatusBar.
Triggered by many areas of the UI. Receives a widget (EventBox,
ScrolledWindow, Toolbar) and the event itself (Gdk.EventMotion,
Gdk.EventCrossing, ...).
Calls the update_statusbar function to push helpful messages to the
StatusBar.
"""
self.update_statusbar()
def update_statusbar(self, mpl_event=None, *args, **kwargs):
"""
When the mouse cursor hovers inside the plot, the position of the
mpl_event is pushed to the statusbar at the bottom of the GUI. Also
called by a few buttons, to push messages to the statusbar.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
def push_drawing_message():
self.statbar.push(1, "Left click inside the plot to draw a feature.")
def push_select_layer_message():
self.statbar.push(1, "Select the layer that you want to edit.")
def push_one_layer_message():
self.statbar.push(1, "Select only one layer to edit it.")
def push_group_layer_message():
self.statbar.push(1, "Group layers cannot hold features. Select a data-layer for editing.")
def push_messages():
if self.draw_features == True:
if len(row_list) == 1:
row = row_list[0]
lyr_obj = model[row][3]
if lyr_obj == None:
push_group_layer_message()
else:
push_drawing_message()
elif len(row_list) > 1:
push_one_layer_message()
else:
push_select_layer_message()
else:
self.statbar.push(1, (""))
def push_stereo_coordinates(mpl_event):
alpha_deg, gamma_deg = self.convert_xy_to_dirdip(mpl_event)
alpha_deg = int(alpha_deg)
gamma_deg = int(gamma_deg)
#Ensure 000/00 formatting
alpha_deg = str(alpha_deg).rjust(3, "0")
gamma_deg = str(gamma_deg).rjust(2, "0")
self.statbar.push(1, ("{0} / {1}".format(alpha_deg, gamma_deg)))
def push_rose_coordinates(mpl_event):
self.statbar.push(1, (_("Rose Diagram")))
def push_mpl_event(mpl_event):
title = mpl_event.inaxes.get_title()
if title == "ax_stereo":
push_stereo_coordinates(mpl_event)
elif title == "ax_rose":
push_rose_coordinates(mpl_event)
elif title == "ax_fluc":
self.statbar.push(1, ("Fluctuation Histogram"))
elif title == "ax_mohr":
self.statbar.push(1, ("Mohr Circle"))
else:
pass
if mpl_event is not None:
if mpl_event.inaxes is not None:
push_mpl_event(mpl_event)
else:
push_messages()
else:
push_messages()
def on_toolbutton_file_parse_clicked(self, toolbutton):
"""
Triggered from the GUI. Opens the filechooserdialog for parsing text
files.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
if len(row_list) == 1:
fc = FileChooserParse(self.run_file_parser, self.main_window)
fc.run()
elif len(row_list) == 0:
self.statbar.push(1, ("Please select a layer to add data to."))
self.canvas.draw()
elif len(row_list) > 1:
self.statbar.push(1,
("Please select only one layer to add data to."))
self.canvas.draw()
def run_file_parser(self, text_file):
"""
Triggered when a file is opend from the filechooserdialog for parsing
files. Passes the file to the file parsing dialog.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
if len(row_list) == 1:
row = row_list[0]
lyr_obj = model[row][3]
fp = FileParseDialog(text_file, lyr_obj, self.redraw_plot,
self.add_planar_feature,
self.add_linear_feature,
self.add_faultplane_feature, self.main_window)
fp.run()
def on_toolbutton_export_clicked(self, toolbutton):
# pylint: disable=unused-argument
"""
Runs the FileChooserExport dialog.
Triggered when user clicks on the toolbutton_export. Creates an instance
of the FileChooserExport class and runs the dialog. Checks if the user
has selected a layer first.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
if len(row_list) == 1:
self.redraw_plot()
exportdialog = FileChooserExport(self.export_data, self.main_window)
exportdialog.run()
elif len(row_list) == 0:
self.statbar.push(1, ("Please select a layer to export."))
self.canvas.draw()
elif len(row_list) > 1:
self.statbar.push(1, ("Please select only one layer to export."))
self.canvas.draw()
def export_data(self, save_location):
"""
Exports data to a location that is passes by the FileExportDialog.
This method receives a save_location from the FileExportDialog class.
A CSV file is created at that location. Depending on the layer type a
different header is written to the file and it then iterates over
all rows.
"""
selection = self.layer_view.get_selection()
model, row_list = selection.get_selected_rows()
def iterate_over_planes(model, path, itr):
r = model[path]
writer.writerow({"dip-direction": r[0], "dip": r[1],
"stratigraphy": r[2]})
def iterate_over_linears(model, path, itr):
r = model[path]
writer.writerow({"dip-direction": r[0], "dip": r[1],
"sense": r[2]})
def iterate_over_faultplanes(model, path, itr):
r = model[path]
writer.writerow({"plane-dip-direction": r[0],
"plane-dip": r[1],
"linear-dip-direction": r[2],
"linear-dip": r[3],
"linear-sense": r[4]})
def iterate_over_smallcircles(model, path, itr):
r = model[path]
writer.writerow({"dip-direction": r[0],
"dip": r[1],
"opening-angle": r[2]})
row = row_list[0]
lyr_obj = model[row][3]
data_obj = lyr_obj.get_data_treestore()
with open(save_location, "w", newline="") as csvfile:
lyr_type = lyr_obj.get_layer_type()
if lyr_type == "plane":
fieldnames = ["dip-direction", "dip", "stratigraphy"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
data_obj.foreach(iterate_over_planes)
elif lyr_type == "line":
fieldnames = ["dip-direction", "dip", "sense"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
data_obj.foreach(iterate_over_linears)
elif lyr_type == "faultplane":
fieldnames = ["plane-dip-direction", "plane-dip",
"linear-dip-direction", "linear-dip",
"linear-sense"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
data_obj.foreach(iterate_over_faultplanes)
elif lyr_type == "smallcircle":
fieldnames = ["dip-direction", "dip", "opening-angle"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
data_obj.foreach(iterate_over_smallcircles)
def on_eb_lbl_layerview_button_press_event(self, eventbox, eventbutton):
"""
Clicking on the layerview label deselects all layers.
"""
selection = self.layer_view.get_selection()
selection.unselect_all()
def on_eb_lbl_dataview_button_press_event(self, eventbox, eventbutton):
"""
Clicking on the dataview label deselects all layers.
"""
selection = self.layer_view.get_selection()
selection.unselect_all()
def on_menuitem_online_help_activate(self, menuitem):
# pylint: disable=unused-argument
"""
This menuitem opens a new browser tab with the online help.
Triggered when the user clicks "Help -> View Online Help" in the
MenuBar.
"""
webbrowser.open_new_tab(
"http://innstereo.readthedocs.org")
def on_menuitem_website_activate(self, menuitem):
# pylint: disable=unused-argument
"""
This menuitem opens a new browser tab with the website of InnStereo.
Triggered when the user clicks "Help -> Visit the Website" in the
MenuBar.
"""
webbrowser.open_new_tab(
"http://innstereo.github.io/")
def on_menuitem_report_bug_activate(self, menuitem):
# pylint: disable=unused-argument
"""
This menuitem opens a new browser tab with the bug tracker.
Triggered when the user clicks "Help -> Report a Bug" in the
MenuBar.
"""
webbrowser.open_new_tab(
"https://github.com/innstereo/innstereo/issues")
def startup(testing=False):
"""
Starts the GUI and the application main-loop.
Initializes an instance of the Gtk.Builder and loads the GUI from the
".glade" file. Then it initializes the main window and starts the Gtk.main
loop. This function is also passed to the window, so it can open up new
instances of the program.
"""
builder = Gtk.Builder()
script_dir = os.path.dirname(__file__)
rel_path = "gui_layout.glade"
abs_path = os.path.join(script_dir, rel_path)
builder.set_translation_domain(i18n().get_ts_domain())
objects = builder.add_objects_from_file(abs_path,
("main_window", "image_new_plane", "image_new_faultplane",
"image_new_line", "image_new_fold", "image_plane_intersect",
"image_best_fitting_plane", "layer_right_click_menu",
"image_create_small_circle", "menu_plot_views", "image_eigenvector",
"poles_to_lines", "image_linears_to_planes", "image_rotate",
"image_pt_axis", "image_mean_vector", "image_fisher"))
gui_instance = MainWindow(builder, testing)
builder.connect_signals(gui_instance)
if testing == False:
Gtk.main()
return gui_instance
if __name__ == "__main__":
startup()
| gpl-2.0 |
cg31/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_test.py | 5 | 39527 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.python.ops import math_ops
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = tf.contrib.learn.datasets.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = tf.contrib.learn.datasets.base.Dataset(data=iris.data[ids],
target=iris.target[ids])
return iris
def _iris_input_logistic_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100, 1], dtype=tf.int32)
def _iris_input_multiclass_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150, 1], dtype=tf.int32)
class DNNClassifierTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.DNNClassifier)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_logistic_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertLess(scores['loss'], 0.3)
def testLogisticRegression_MatrixData_Target1D(self):
"""Same as the last test, but target shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100], dtype=tf.int32)
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [tf.contrib.layers.real_valued_column('', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.8)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [0.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertLess(scores['loss'], 0.3)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertListEqual(predictions, [1, 0, 0])
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[50], [20], [10]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[0.8], [0.], [0.2]], dtype=tf.float32)
return features, target
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_float_label, steps=1000)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn, as_iterable=True))
# Prediction probabilities mirror the target column, which proves that the
# classifier learns from float input.
self.assertAllClose(
predictions_proba, [[0.2, 0.8], [1., 0.], [0.8, 0.2]], atol=0.05)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertListEqual(predictions, [1, 0, 0])
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_iris_input_multiclass_fn, steps=200)
self.assertTrue('centered_bias_weight' in classifier.get_variable_names())
scores = classifier.evaluate(input_fn=_iris_input_multiclass_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.8)
self.assertLess(scores['loss'], 0.3)
def testMultiClass_MatrixData_Target1D(self):
"""Same as the last test, but target shape is [150] instead of [150, 1]."""
def _input_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150], dtype=tf.int32)
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.8)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = tf.contrib.learn.datasets.load_iris()
train_x = iris.data
train_y = iris.target
feature_columns = [tf.contrib.layers.real_valued_column('', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.8)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
target = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
return features, target
classifier = tf.contrib.learn.DNNClassifier(
n_classes=2,
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(scores['loss'], 0.562, delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
def _input_fn_eval():
# 4 rows, with different weights.
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
return features, target
classifier = tf.contrib.learn.DNNClassifier(
weight_column_name='w',
n_classes=2,
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(scores['loss'], 1.06, delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
target = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, target
def _input_fn_eval():
# Create 4 rows (y = x)
target = tf.constant([[1], [1], [1], [1]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
classifier = tf.contrib.learn.DNNClassifier(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the accuracy
# should be close to 1.
self.assertGreater(scores['accuracy'], 0.9)
def testPredict_AsIterableFalse(self):
"""Tests predict and predict_prob methods with as_iterable=False."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertLess(scores['loss'], 0.3)
predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)
self.assertListEqual(list(predictions), [1, 0, 0])
predictions = classifier.predict_proba(input_fn=_input_fn,
as_iterable=False)
self.assertAllClose(
predictions, [[0., 1., 0.], [1., 0., 0.], [1., 0., 0.]], atol=0.1)
def testPredict_AsIterable(self):
"""Tests predict and predict_prob methods with as_iterable=True."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertLess(scores['loss'], 0.3)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertListEqual(predictions, [1, 0, 0])
predictions = list(
classifier.predict_proba(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(
predictions, [[0., 1., 0.], [1., 0., 0.], [1., 0., 0.]], atol=0.3)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
target = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs),
}
return features, target
def _my_metric_op(predictions, targets):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
targets = tf.to_float(targets)
predictions = tf.slice(predictions, [0, 1], [-1, 1])
targets = math_ops.cast(targets, predictions.dtype)
return tf.reduce_sum(tf.mul(predictions, targets))
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key='classes'),
'my_precision': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_precision,
prediction_key='classes'),
'my_metric': MetricSpec(
metric_fn=_my_metric_op,
prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric'
]).issubset(set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict(input_fn=predict_input_fn)))
self.assertEqual(_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_auc,
prediction_key='bad_type')})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier = tf.contrib.learn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict(input_fn=predict_input_fn)
del classifier
classifier2 = tf.contrib.learn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config=tf.contrib.learn.RunConfig(
num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({}),
tf_random_seed=5))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertLess(scores['loss'], 0.3)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
tf.contrib.layers.real_valued_column('age'),
tf.contrib.layers.embedding_column(language, dimension=1)
]
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=False,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_iris_input_multiclass_fn, steps=200)
self.assertFalse('centered_bias_weight' in classifier.get_variable_names())
scores = classifier.evaluate(input_fn=_iris_input_multiclass_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.8)
self.assertLess(scores['loss'], 0.3)
class DNNRegressorTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.DNNRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_iris_input_logistic_fn, steps=200)
scores = regressor.evaluate(input_fn=_iris_input_logistic_fn, steps=1)
self.assertLess(scores['loss'], 0.3)
def testRegression_MatrixData_Target1D(self):
"""Same as the last test, but target shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100], dtype=tf.int32)
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.3)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [tf.contrib.layers.real_valued_column('', dimension=4)]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertLess(scores['loss'], 0.3)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.3)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
return features, target
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(scores['loss'], 0.1875, delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
def _input_fn_eval():
# 4 rows, with different weights.
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
return features, target
regressor = tf.contrib.learn.DNNRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(scores['loss'], 0.4125, delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, target
def _input_fn_eval():
# Create 4 rows (y = x)
target = tf.constant([[1.], [1.], [1.], [1.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
regressor = tf.contrib.learn.DNNRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
target = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(target, dtype=tf.float32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predictions, target, atol=0.2)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
target = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(target, dtype=tf.float32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predictions, target, atol=0.2)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs),
}
return features, target
def _my_metric_op(predictions, targets):
return tf.reduce_sum(tf.mul(predictions, targets))
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': tf.contrib.metrics.streaming_mean_squared_error,
'my_metric': _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests that when the key is a tuple, an error is raised.
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={('my_error', 'predictions'):
tf.contrib.metrics.streaming_mean_squared_error})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = tf.contrib.learn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict(input_fn=predict_input_fn))
del regressor
regressor2 = tf.contrib.learn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
predictions2 = list(regressor2.predict(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config=tf.contrib.learn.RunConfig(
num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({}),
tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.3)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=False,
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.3)
def boston_input_fn():
boston = tf.contrib.learn.datasets.load_boston()
features = tf.cast(tf.reshape(tf.constant(boston.data), [-1, 13]), tf.float32)
target = tf.cast(tf.reshape(tf.constant(boston.target), [-1, 1]), tf.float32)
return features, target
class FeatureColumnTest(tf.test.TestCase):
def testTrain(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[3, 3])
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
wathen/PhD | MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/SplitMatrix/ScottTest/Hartman2D/MHDfluid.py | 1 | 17145 | #!/usr/bin/python
# interpolate scalar gradient onto nedelec space
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
from dolfin import *
# from MatrixOperations import *
import numpy as np
import PETScIO as IO
import common
import scipy
import scipy.io
import time
import scipy.sparse as sp
import BiLinear as forms
import IterOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import Solver as S
import MHDmatrixPrecondSetup as PrecondSetup
import NSprecondSetup
import MHDprec as MHDpreconditioner
import gc
import MHDmulti
import MHDmatrixSetup as MHDsetup
import HartmanChannel
import ExactSol
# import matplotlib.pyplot as plt
#@profile
m = 4
set_log_active(False)
errL2u = np.zeros((m-1,1))
errH1u = np.zeros((m-1,1))
errL2p = np.zeros((m-1,1))
errL2b = np.zeros((m-1,1))
errCurlb = np.zeros((m-1,1))
errL2r = np.zeros((m-1,1))
errH1r = np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder = np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
NSave = np.zeros((m-1,1))
Mave = np.zeros((m-1,1))
TotalTime = np.zeros((m-1,1))
DimSave = np.zeros((m-1,4))
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
MU[0] = 1e0
for xx in xrange(1,m):
print xx
level[xx-1] = xx + 0
nn = 2**(level[xx-1])
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
L = 10.
y0 = 2.
z0 = 1.
mesh, boundaries, domains = HartmanChannel.Domain(nn)
parameters['form_compiler']['quadrature_degree'] = -1
order = 2
parameters['reorder_dofs_serial'] = False
Velocity = VectorElement("CG", mesh.ufl_cell(), order)
Pressure = FiniteElement("CG", mesh.ufl_cell(), order-1)
Magnetic = FiniteElement("N1curl", mesh.ufl_cell(), order-1)
Lagrange = FiniteElement("CG", mesh.ufl_cell(), order-1)
VelocityF = VectorFunctionSpace(mesh, "CG", order)
PressureF = FunctionSpace(mesh, "CG", order-1)
MagneticF = FunctionSpace(mesh, "N1curl", order-1)
LagrangeF = FunctionSpace(mesh, "CG", order-1)
W = FunctionSpace(mesh, MixedElement([Velocity, Pressure, Magnetic,Lagrange]))
Velocitydim[xx-1] = W.sub(0).dim()
Pressuredim[xx-1] = W.sub(1).dim()
Magneticdim[xx-1] = W.sub(2).dim()
Lagrangedim[xx-1] = W.sub(3).dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [W.sub(0).dim(), W.sub(1).dim(), W.sub(2).dim(), W.sub(3).dim()]
def boundary(x, on_boundary):
return on_boundary
FSpaces = [VelocityF,PressureF,MagneticF,LagrangeF]
DimSave[xx-1,:] = np.array(dim)
kappa = 1.0
Mu_m = 10.0
MU = 1.0
N = FacetNormal(mesh)
IterType = 'Full'
params = [kappa,Mu_m,MU]
n = FacetNormal(mesh)
trunc = 4
u0, p0, b0, r0, pN, Laplacian, Advection, gradPres, NS_Couple, CurlCurl, gradR, M_Couple = HartmanChannel.ExactSolution(mesh, params)
# kappa = 0.0
# params = [kappa,Mu_m,MU]
# u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD2D(1, 1)
MO.PrintStr("Seting up initial guess matricies",2,"=","\n\n","\n")
BCtime = time.time()
BC = MHDsetup.BoundaryIndices(mesh)
MO.StrTimePrint("BC index function, time: ", time.time()-BCtime)
Hiptmairtol = 1e-6
HiptmairMatrices = PrecondSetup.MagneticSetup(mesh, Magnetic, Lagrange, b0, r0, Hiptmairtol, params)
MO.PrintStr("Setting up MHD initial guess",5,"+","\n\n","\n\n")
F_NS = -MU*Laplacian + Advection + gradPres - kappa*NS_Couple
if kappa == 0.0:
F_M = Mu_m*CurlCurl + gradR - kappa*M_Couple
else:
F_M = Mu_m*kappa*CurlCurl + gradR - kappa*M_Couple
u_k, p_k = HartmanChannel.Stokes(Velocity, Pressure, F_NS, u0, 1, params, mesh, boundaries, domains)
b_k, r_k = HartmanChannel.Maxwell(Magnetic, Lagrange, F_M, b0, r0, params, mesh, HiptmairMatrices, Hiptmairtol)
(u, p, b, r) = TrialFunctions(W)
(v, q, c, s) = TestFunctions(W)
if kappa == 0.0:
m11 = params[1]*inner(curl(b),curl(c))*dx
else:
r
m11 = params[1]*params[0]*inner(curl(b),curl(c))*dx
m21 = inner(c,grad(r))*dx
m12 = inner(b,grad(s))*dx
a11 = params[2]*inner(grad(v), grad(u))*dx + inner((grad(u)*u_k),v)*dx + (1./2)*div(u_k)*inner(u,v)*dx - (1./2)*inner(u_k,n)*inner(u,v)*ds
a12 = -div(v)*p*dx
a21 = -div(u)*q*dx
CoupleT = params[0]*(v[0]*b_k[1]-v[1]*b_k[0])*curl(b)*dx
Couple = -params[0]*(u[0]*b_k[1]-u[1]*b_k[0])*curl(c)*dx
a = m11 + m12 + m21 + a11 + a21 + a12 #+ Couple + CoupleT
Lns = inner(v, F_NS)*dx #- inner(pN*n,v)*ds(2)
Lmaxwell = inner(c, F_M)*dx
if kappa == 0.0:
m11 = params[1]*params[0]*inner(curl(b_k),curl(c))*dx
else:
m11 = params[1]*inner(curl(b_k),curl(c))*dx
m21 = inner(c,grad(r_k))*dx
m12 = inner(b_k,grad(s))*dx
a11 = params[2]*inner(grad(v), grad(u_k))*dx + inner((grad(u_k)*u_k),v)*dx + (1./2)*div(u_k)*inner(u_k,v)*dx - (1./2)*inner(u_k,n)*inner(u_k,v)*ds
a12 = -div(v)*p_k*dx
a21 = -div(u_k)*q*dx
CoupleT = params[0]*(v[0]*b_k[1]-v[1]*b_k[0])*curl(b_k)*dx
Couple = -params[0]*(u_k[0]*b_k[1]-u_k[1]*b_k[0])*curl(c)*dx
L = Lns + Lmaxwell - (m11 + m12 + m21 + a11 + a21 + a12 + Couple + CoupleT)
ones = Function(PressureF)
ones.vector()[:]=(0*ones.vector().array()+1)
pConst = - assemble(p_k*dx)/assemble(ones*dx)
p_k.vector()[:] += - assemble(p_k*dx)/assemble(ones*dx)
x = Iter.u_prev(u_k,p_k,b_k,r_k)
KSPlinearfluids, MatrixLinearFluids = PrecondSetup.FluidLinearSetup(PressureF, MU, mesh)
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(PressureF, MU, u_k, mesh)
IS = MO.IndexSet(W, 'Blocks')
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-4 # tolerance
iter = 0 # iteration counter
maxiter = 5 # max no of iterations allowed
SolutionTime = 0
outer = 0
# parameters['linear_algebra_backend'] = 'uBLAS'
u_is = PETSc.IS().createGeneral(W.sub(0).dofmap().dofs())
p_is = PETSc.IS().createGeneral(W.sub(1).dofmap().dofs())
b_is = PETSc.IS().createGeneral(W.sub(2).dofmap().dofs())
r_is = PETSc.IS().createGeneral(W.sub(3).dofmap().dofs())
NS_is = PETSc.IS().createGeneral(range(VelocityF.dim()+PressureF.dim()))
M_is = PETSc.IS().createGeneral(range(VelocityF.dim()+PressureF.dim(),W.dim()))
bcu = DirichletBC(W.sub(0),Expression(("0.0","0.0"), degree=4), boundary)
bcp = DirichletBC(W.sub(1),Expression(("0.0"), degree=4), boundary)
bcb = DirichletBC(W.sub(2),Expression(("0.0","0.0"),degree=4), boundary)
bcr = DirichletBC(W.sub(3),Expression("0.0",degree=4), boundary)
bcs = [bcu, bcb, bcr]
OuterTol = 1e-5
InnerTol = 1e-5
NSits = 0
Mits = 0
TotalStart = time.time()
SolutionTime = 0
bcu1 = DirichletBC(VelocityF,Expression(("0.0","0.0"), degree=4), boundary)
while eps > tol and iter < maxiter:
iter += 1
MO.PrintStr("Iter "+str(iter),7,"=","\n\n","\n\n")
# initial = Function(W)
# R = action(a,initial);
# DR = derivative(R, initial);
A, b = assemble_system(a, L, bcs)
A, b = CP.Assemble(A,b)
u = b.duplicate()
# u.setRandom()
print " Max rhs = ",np.max(b.array)
MO.PrintStr("residual "+str(b.norm()),40,"=","\n\n","\n\n")
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(PressureF, MU, u_k, mesh)
b_t = TrialFunction(VelocityF)
c_t = TestFunction(VelocityF)
n = FacetNormal(mesh)
# mat = as_matrix([[b_k[1]*b_k[1],-b_k[1]*b_k[0]],[-b_k[1]*b_k[0],b_k[0]*b_k[0]]])
# aa = params[2]*inner(grad(b_t), grad(c_t))*dx(W.mesh()) + inner((grad(b_t)*u_k),c_t)*dx(W.mesh()) +(1./2)*div(u_k)*inner(c_t,b_t)*dx(W.mesh()) - (1./2)*inner(u_k,n)*inner(c_t,b_t)*ds(W.mesh())+kappa/Mu_m*inner(mat*b_t,c_t)*dx(W.mesh())
# ShiftedMass = assemble(aa)
# bcu1.apply(ShiftedMass)
# ShiftedMass = CP.Assemble(ShiftedMass)
ShiftedMass = A.getSubMatrix(u_is, u_is)
kspF = NSprecondSetup.LSCKSPnonlinear(ShiftedMass)
Options = 'p4'
# print (u11-u).norm()
stime = time.time()
u, mits,nsits = S.solve(A,b,u,params,W,'Direct',IterType,OuterTol,InnerTol,HiptmairMatrices,Hiptmairtol,KSPlinearfluids, Fp,kspF)
Soltime = time.time() - stime
MO.StrTimePrint("MHD solve, time: ", Soltime)
Mits += mits
NSits += mits
SolutionTime += Soltime
# u = IO.arrayToVec( u)
u1, p1, b1, r1, eps = Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter)
p1.vector()[:] += - assemble(p1*dx)/assemble(ones*dx)
u_k.assign(u1)
p_k.assign(p1)
b_k.assign(b1)
r_k.assign(r1)
uOld = np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
if iter > 1:
p11 = u.getSubVector(p_is)
p22 = u11.getSubVector(p_is)
P1 = Function(PressureF)
P1.vector()[:] = p11.array
P1.vector()[:] += - assemble(P1*dx)/assemble(ones*dx)
# x11 = np.concatenate((u.getSubVector(u_is).array, P, u.getSubVector(b_is).array, u.getSubVector(r_is).array), axis=0)
P2 = Function(PressureF)
P2.vector()[:] = p22.array
P2.vector()[:] += - assemble(P2*dx)/assemble(ones*dx)
# x22 = np.concatenate((u11.getSubVector(u_is).array, P, u11.getSubVector(b_is).array, u11.getSubVector(r_is).array), axis=0)
print P1, P2
U = u.getSubVector(u_is).array - u11.getSubVector(u_is).array
P = P1.vector().array() - P2.vector().array()
B = u.getSubVector(b_is).array - u11.getSubVector(b_is).array
R = u.getSubVector(r_is).array - u11.getSubVector(r_is).array
print np.linalg.norm(U)/VelocityF.dim(), " ", np.linalg.norm(P)/PressureF.dim(), " ", np.linalg.norm(B)/MagneticF.dim(), " ", np.linalg.norm(R)/LagrangeF.dim()
u11 = u
# X = x
# x = IO.arrayToVec(uOld)
# w = Function(W)
# # u2 = np.concatenate((u1.vector().array(),p1.vector().array(),b1.vector().array(),r1.vector().array()), axis=0)
# # u1 = np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
# A, b = assemble_system(a, L, bcs)
# A, b = CP.Assemble(A,b)
# print (A*u-b).norm()
# w.vector()[:] = ((A*u-b).array)
# eps = sqrt(assemble(inner(w,w)*dx))
# print "2222222 ", eps
# eps = b.norm()/normb
# iter = 1
SolTime[xx-1] = SolutionTime/iter
NSave[xx-1] = (float(NSits)/iter)
Mave[xx-1] = (float(Mits)/iter)
iterations[xx-1] = iter
TotalTime[xx-1] = time.time() - TotalStart
XX= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
ExactSolution = [u0,p0,b0,r0]
errL2u[xx-1], errH1u[xx-1], errL2p[xx-1], errL2b[xx-1], errCurlb[xx-1], errL2r[xx-1], errH1r[xx-1] = Iter.Errors(XX,mesh,FSpaces,ExactSolution,order,dim, "CG")
print float(Wdim[xx-1][0])/Wdim[xx-2][0]
if xx > 1:
l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1])/np.log2((float(Velocitydim[xx-1][0])/Velocitydim[xx-2][0])**(1./2)))
H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1])/np.log2((float(Velocitydim[xx-1][0])/Velocitydim[xx-2][0])**(1./2)))
l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1])/np.log2((float(Pressuredim[xx-1][0])/Pressuredim[xx-2][0])**(1./2)))
l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1])/np.log2((float(Magneticdim[xx-1][0])/Magneticdim[xx-2][0])**(1./2)))
Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1])/np.log2((float(Magneticdim[xx-1][0])/Magneticdim[xx-2][0])**(1./2)))
l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1])/np.log2((float(Lagrangedim[xx-1][0])/Lagrangedim[xx-2][0])**(1./2)))
H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1])/np.log2((float(Lagrangedim[xx-1][0])/Lagrangedim[xx-2][0])**(1./2)))
import pandas as pd
LatexTitles = ["l","DoFu","Dofp","V-L2","L2-order","V-H1","H1-order","P-L2","PL2-order"]
LatexValues = np.concatenate((level,Velocitydim,Pressuredim,errL2u,l2uorder,errH1u,H1uorder,errL2p,l2porder), axis=1)
LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
pd.set_option('precision',3)
LatexTable = MO.PandasFormat(LatexTable,"V-L2","%2.4e")
LatexTable = MO.PandasFormat(LatexTable,'V-H1',"%2.4e")
LatexTable = MO.PandasFormat(LatexTable,"H1-order","%1.2f")
LatexTable = MO.PandasFormat(LatexTable,'L2-order',"%1.2f")
LatexTable = MO.PandasFormat(LatexTable,"P-L2","%2.4e")
LatexTable = MO.PandasFormat(LatexTable,'PL2-order',"%1.2f")
print LatexTable.to_latex()
print "\n\n Magnetic convergence"
MagneticTitles = ["l","B DoF","R DoF","B-L2","L2-order","B-Curl","HCurl-order"]
MagneticValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2b,l2border,errCurlb,Curlborder),axis=1)
MagneticTable= pd.DataFrame(MagneticValues, columns = MagneticTitles)
pd.set_option('precision',3)
MagneticTable = MO.PandasFormat(MagneticTable,"B-Curl","%2.4e")
MagneticTable = MO.PandasFormat(MagneticTable,'B-L2',"%2.4e")
MagneticTable = MO.PandasFormat(MagneticTable,"L2-order","%1.2f")
MagneticTable = MO.PandasFormat(MagneticTable,'HCurl-order',"%1.2f")
print MagneticTable.to_latex()
print "\n\n Lagrange convergence"
LagrangeTitles = ["l","B DoF","R DoF","R-L2","L2-order","R-H1","H1-order"]
LagrangeValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2r,l2rorder,errH1r,H1rorder),axis=1)
LagrangeTable= pd.DataFrame(LagrangeValues, columns = LagrangeTitles)
pd.set_option('precision',3)
LagrangeTable = MO.PandasFormat(LagrangeTable,"R-L2","%2.4e")
LagrangeTable = MO.PandasFormat(LagrangeTable,'R-H1',"%2.4e")
LagrangeTable = MO.PandasFormat(LagrangeTable,"L2-order","%1.2f")
LagrangeTable = MO.PandasFormat(LagrangeTable,'H1-order',"%1.2f")
print LagrangeTable.to_latex()
#
#
#
import pandas as pd
# p = plot(u_k)
# p.write_png()
# p = plot(p_k)
# p.write_png()
# # p = plot(b_k)
# # p.write_png()
# # p = plot(r_k)
# # p.write_png()
# p = plot(interpolate(u0,Velocity))
# p.write_png()
# p = plot(interpolate(p0,Pressure))
# p.write_png()
# # p = plot(interpolate(b0,Magnetic))
# # p.write_png()
# # p = plot(interpolate(r0,Lagrange))
# # p.write_png()
# sss
print "\n\n Iteration table"
if IterType == "Full":
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av Outer its","Av Inner its",]
else:
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av NS iters","Av M iters"]
IterValues = np.concatenate((level,Wdim,SolTime,TotalTime,iterations,Mave,NSave),axis=1)
IterTable= pd.DataFrame(IterValues, columns = IterTitles)
if IterType == "Full":
IterTable = MO.PandasFormat(IterTable,'Av Outer its',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av Inner its',"%2.1f")
else:
IterTable = MO.PandasFormat(IterTable,'Av NS iters',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av M iters',"%2.1f")
print IterTable.to_latex()
MO.StoreMatrix(DimSave, "dim")
file = File("u_k.pvd")
file << u_k
file = File("p_k.pvd")
file << p_k
file = File("b_k.pvd")
file << b_k
file = File("r_k.pvd")
file << r_k
file = File("u0.pvd")
file << interpolate(u0, VelocityF)
file = File("p0.pvd")
file << interpolate(p0, PressureF)
file = File("b0.pvd")
file << interpolate(b0, MagneticF)
file = File("r0.pvd")
file << interpolate(r0, LagrangeF)
interactive()
# \begin{tabular}{lrrrrrll}
# \toprule
# {} & l & DoF & AV solve Time & Total picard time & picard iterations & Av Outer its & Av Inner its \\
# \midrule
# 0 & 4.0 & 3.556e+03 & 0.888 & 5.287 & 5.0 & 28.4 & 28.4 \\
# 1 & 5.0 & 1.376e+04 & 7.494 & 38.919 & 5.0 & 26.8 & 26.8 \\
# 2 & 6.0 & 5.415e+04 & 42.334 & 217.070 & 5.0 & 28.8 & 28.8 \\
# 3 & 7.0 & 2.148e+05 & 196.081 & 1001.671 & 5.0 & 28.4 & 28.4 \\
# 4 & 8.0 & 8.556e+05 & 843.574 & 4294.126 & 5.0 & 28.2 & 28.2 \\
# 5 & 9.0 & 3.415e+06 & 3865.731 & 15683.881 & 4.0 & 28.2 & 28.2 \\
# \bottomrule
# \end{tabular}
| mit |
josherick/bokeh | bokeh/compat/mplexporter/tools.py | 75 | 1732 | """
Tools for matplotlib plot exporting
"""
def ipynb_vega_init():
"""Initialize the IPython notebook display elements
This function borrows heavily from the excellent vincent package:
http://github.com/wrobstory/vincent
"""
try:
from IPython.core.display import display, HTML
except ImportError:
print('IPython Notebook could not be loaded.')
require_js = '''
if (window['d3'] === undefined) {{
require.config({{ paths: {{d3: "http://d3js.org/d3.v3.min"}} }});
require(["d3"], function(d3) {{
window.d3 = d3;
{0}
}});
}};
if (window['topojson'] === undefined) {{
require.config(
{{ paths: {{topojson: "http://d3js.org/topojson.v1.min"}} }}
);
require(["topojson"], function(topojson) {{
window.topojson = topojson;
}});
}};
'''
d3_geo_projection_js_url = "http://d3js.org/d3.geo.projection.v0.min.js"
d3_layout_cloud_js_url = ("http://wrobstory.github.io/d3-cloud/"
"d3.layout.cloud.js")
topojson_js_url = "http://d3js.org/topojson.v1.min.js"
vega_js_url = 'http://trifacta.github.com/vega/vega.js'
dep_libs = '''$.getScript("%s", function() {
$.getScript("%s", function() {
$.getScript("%s", function() {
$.getScript("%s", function() {
$([IPython.events]).trigger("vega_loaded.vincent");
})
})
})
});''' % (d3_geo_projection_js_url, d3_layout_cloud_js_url,
topojson_js_url, vega_js_url)
load_js = require_js.format(dep_libs)
html = '<script>'+load_js+'</script>'
display(HTML(html))
| bsd-3-clause |
TomAugspurger/pandas | pandas/tests/series/methods/test_update.py | 2 | 4213 | import numpy as np
import pytest
from pandas import CategoricalDtype, DataFrame, NaT, Series, Timestamp
import pandas._testing as tm
class TestUpdate:
def test_update(self):
s = Series([1.5, np.nan, 3.0, 4.0, np.nan])
s2 = Series([np.nan, 3.5, np.nan, 5.0])
s.update(s2)
expected = Series([1.5, 3.5, 3.0, 5.0, np.nan])
tm.assert_series_equal(s, expected)
# GH 3217
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
df["c"].update(Series(["foo"], index=[0]))
expected = DataFrame(
[[1, np.nan, "foo"], [3, 2.0, np.nan]], columns=["a", "b", "c"]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"other, dtype, expected",
[
# other is int
([61, 63], "int32", Series([10, 61, 12], dtype="int32")),
([61, 63], "int64", Series([10, 61, 12])),
([61, 63], float, Series([10.0, 61.0, 12.0])),
([61, 63], object, Series([10, 61, 12], dtype=object)),
# other is float, but can be cast to int
([61.0, 63.0], "int32", Series([10, 61, 12], dtype="int32")),
([61.0, 63.0], "int64", Series([10, 61, 12])),
([61.0, 63.0], float, Series([10.0, 61.0, 12.0])),
([61.0, 63.0], object, Series([10, 61.0, 12], dtype=object)),
# others is float, cannot be cast to int
([61.1, 63.1], "int32", Series([10.0, 61.1, 12.0])),
([61.1, 63.1], "int64", Series([10.0, 61.1, 12.0])),
([61.1, 63.1], float, Series([10.0, 61.1, 12.0])),
([61.1, 63.1], object, Series([10, 61.1, 12], dtype=object)),
# other is object, cannot be cast
([(61,), (63,)], "int32", Series([10, (61,), 12])),
([(61,), (63,)], "int64", Series([10, (61,), 12])),
([(61,), (63,)], float, Series([10.0, (61,), 12.0])),
([(61,), (63,)], object, Series([10, (61,), 12])),
],
)
def test_update_dtypes(self, other, dtype, expected):
ser = Series([10, 11, 12], dtype=dtype)
other = Series(other, index=[1, 3])
ser.update(other)
tm.assert_series_equal(ser, expected)
@pytest.mark.parametrize(
"series, other, expected",
[
# update by key
(
Series({"a": 1, "b": 2, "c": 3, "d": 4}),
{"b": 5, "c": np.nan},
Series({"a": 1, "b": 5, "c": 3, "d": 4}),
),
# update by position
(Series([1, 2, 3, 4]), [np.nan, 5, 1], Series([1, 5, 1, 4])),
],
)
def test_update_from_non_series(self, series, other, expected):
# GH 33215
series.update(other)
tm.assert_series_equal(series, expected)
@pytest.mark.parametrize(
"result, target, expected",
[
(
Series(["a", None], dtype="string"),
Series([None, "b"], dtype="string"),
Series(["a", "b"], dtype="string"),
),
(
Series([1, None], dtype="Int64"),
Series([None, 2], dtype="Int64"),
Series([1, 2], dtype="Int64"),
),
(
Series([True, None], dtype="boolean"),
Series([None, False], dtype="boolean"),
Series([True, False], dtype="boolean"),
),
(
Series(["a", None], dtype=CategoricalDtype(categories=["a", "b"])),
Series([None, "b"], dtype=CategoricalDtype(categories=["a", "b"])),
Series(["a", "b"], dtype=CategoricalDtype(categories=["a", "b"])),
),
(
Series([Timestamp(year=2020, month=1, day=1, tz="Europe/London"), NaT]),
Series([NaT, Timestamp(year=2020, month=1, day=1, tz="Europe/London")]),
Series([Timestamp(year=2020, month=1, day=1, tz="Europe/London")] * 2),
),
],
)
def test_update_extension_array_series(self, result, target, expected):
result.update(target)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
RUBi-ZA/MODE-TASK | assemblyCovariance.py | 1 | 14726 | #!/usr/bin/env python
# assemblyCovariance.py
# Calculates covariance matrices for the following:
# 1) over all modes
# OR
# 2) specified modes
# AND
# 3) For a single asymmetric unit
# OR
# 4) For a cluster of specified asymmetric units
# Author: Caroline Ross: caroross299@gmail.com
# August 2017
# Calculates and Returns Diagonals of Correlated Matrix for a given set of modes
import os
import argparse
from datetime import datetime
from lib.utils import *
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import cm as CM
import numpy as np
def plotCorrelation(M,fname,Vn,Vx,output):
M = np.array(M) #converts M to and np array. matplotlib requires it to be in this format
fig, ax = plt.subplots() #creates a new empty figure
colors = [('white')] + [(CM.jet(i)) for i in xrange(40,250)] #defines the color scheme of the map
new_map = matplotlib.colors.LinearSegmentedColormap.from_list('new_map', colors, N=300) #define the type of map to be plotted - a LinearSegmentedColormap
heatmap = ax.pcolor(M, cmap=new_map,vmin=-Vn, vmax=Vx) #the next two lines create the map based on M and the colors, vmin and vmax set the range of values plotted
heatmap = plt.pcolor(M, vmin=-Vn, vmax=Vx)
fig = plt.gcf() #sets up figure
fig.set_size_inches(10,15) #sets sixe of figure and the next few lines determines properties about the axis
ax.set_frame_on(False)
ax.invert_yaxis()
ax.xaxis.tick_top()
# rotate the
#plt.xticks(rotation=90)
ax.grid(False)
# Turn off all the ticks
ax = plt.gca()
for t in ax.xaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
for t in ax.yaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
cbar = plt.colorbar(heatmap,orientation="horizontal") #actually plots the heatmap figure which has been created
plt.savefig(output+"/"+fname,dpi=300) #save fig
plt.close('all') #close fig
def calcCovariance(wMatrix, vtMatrix, modes, definedUnits, specifiedUnits, zoom, vmin, vmax,output):
try:
f = open(wMatrix, 'r')
eigen_values = f.readlines()
f.close()
except IOError:
print ('\n**************************************\nERROR!! '+protein_name+' W-MATRIX FILE: '+wMatrix+' NOT FOUND:\n**************************************\n')
sys.exit()
try:
f = open(vtMatrix, 'r')
eigen_vectors = f.readlines()
f.close()
except IOError:
print ('\n**************************************\nERROR!! '+protein_name+' VT-MATRIX FILE: '+vtMatrix+' NOT FOUND:\n**************************************\n')
sys.exit()
total_modes = len(eigen_values)
w_inv = np.zeros((total_modes, total_modes))
try:
if modes == 'all':
#Create A Full W Inverse Matrix (This is if we want correlation averaged over all modes)
for i in range(total_modes):
if i >5:
w_inv[i, i] = 1 / (float(eigen_values[i].split()[1].strip()))
else:
for i in modes:
w_inv[i, i] = 1 / (float(eigen_values[i].split()[1].strip()))
except TypeError:
print ('\n**************************************\nERROR!! INVALID INPUT FOR MODE RANGE: DEFAULT all Has been selected\n**************************************\n')
w_inv = np.zeros((total_modes, total_modes))
for i in range(total_modes):
if i >5:
w_inv[i, i] = 1 / (float(eigen_values[i].split()[1].strip()))
# Read In U and VT full Matrix as U is the transpose of VT I only read in VT and create U from the VT matrix
# info. So we can exclude U output from C++ script for faster analysis
print ("Parsing VT_Matrix")
try:
fvt = open(vtMatrix, 'r')
eigen_vectors = fvt.readlines()
fvt.close()
except IOError:
print ('\n**************************************\nERROR!! '+protein_name+' VT-MATRIX FILE: '+vtMatrix+' NOT FOUND:\n**************************************\n')
sys.exit()
print ("Calculating Transpose of VT")
v_t = np.zeros((total_modes, total_modes))
u = np.zeros((total_modes, total_modes))
for i in range(total_modes):
vectors = eigen_vectors[i].split()
for j in range(total_modes):
vector = float(vectors[j].strip())
v_t[i, j] = vector
u[j, i] = vector
# Calculate Correlation Matrices
print ("Calculating Covariance Matrix")
w_v_t = np.dot(w_inv, v_t)
c = np.dot(u, w_v_t)
# Calculate Trace of the Correlation Matrices
trace_c = np.zeros((total_modes / 3, total_modes / 3))
for i in range(0, total_modes, 3):
for j in range(0, total_modes, 3):
trace = 0
for k in range(3):
trace = trace + c[i + k, j + k]
trace_c[i / 3, j / 3] = trace
print ("Covariance Matrix Calculation Complete")
#Construct full image
plotCorrelation(trace_c,"Covariance_Full.png",vmin,vmax,output)
w=open(output+'/Covariance_Full.txt','w')
for i in range(0, total_modes/3):
for j in range(0, total_modes/3):
w.write(str(trace_c[i,j])+" ")
w.write('\n')
w.close()
try:
#Construct asymmetric image
numberUnits = len(specifiedUnits)
sizePerUnit = len(definedUnits[1])
trace_cf = np.zeros((numberUnits*sizePerUnit,numberUnits*sizePerUnit))
cf_rows = 0
cf_cols = 0
for i in specifiedUnits:
startIndex = (i-1)*sizePerUnit
endIndex = i*sizePerUnit
for k in range(startIndex,endIndex):
cf_cols = 0
for j in specifiedUnits:
startIndexC = (j-1)*sizePerUnit
endIndexC = j*sizePerUnit
for l in range (startIndexC,endIndexC):
trace_cf[cf_rows,cf_cols] = trace_c[k,l]
cf_cols+=1
cf_rows+=1
plotCorrelation(trace_cf,"Covariance_AUnits.png",vmin,vmax,output)
w=open(output+'/Covariance_AUnits.txt','w')
for i in range(numberUnits*sizePerUnit):
for j in range(numberUnits*sizePerUnit):
w.write(str(trace_cf[i,j])+" ")
w.write('\n')
w.close()
if zoom!='0,0':
unit = int(zoom.split(',')[0])
chain = zoom.split(',')[1]
unitAtoms = definedUnits[unit]
count = 0
start = 0
end = 0
for atom in unitAtoms:
count+=1
ch= atom.split()[4]
if ch == chain and start==0:
start = count
elif ch!=chain and start!=0:
end = count
break
elif atom == unitAtoms[-1]:
end = count
start = start-1+ (unit-1)*sizePerUnit
end = end + (unit-1)*sizePerUnit
trace_Z = trace_c[start:end,start:end]
plotCorrelation(trace_Z,"Covariance_Zoom.png",vmin,vmax,output)
w=open(output+'/Covariance_Zoom.txt','w')
for i in range(end-start):
for j in range(end-start):
w.write(str(trace_Z[i,j])+" ")
w.write('\n')
w.close()
except IndexError:
print ('\n**************************************\nERROR!! PDB FILE AND ANM MATRICES ARE IMCOMPATIBLE\nCHECK INPUT PARAMETERS FOR:\n1) INCORRECT PDB FILE \n2) INCORRECT MATRICES \n3) INCORRECT SPECIFIED ASYMMETRIC UNITS OR PDB CHAIN LABELS\n**************************************\n')
sys.exit()
def defineAssemetricUnits(pdb,atom):
try:
pdb_file = open(pdb, 'r')
pdb_lines = pdb_file.readlines()
pdb_file.close()
except IOError:
print ('\n**************************************\nERROR!! FILE ' +pdb + ' NOT FOUND:\n**************************************\n')
sys.exit()
protomers = {}
#Determine protomers
protomerLines = []
chain_dics = []
number_protomer_atoms = 0
countProtomers = 1
for l in pdb_lines:
if l.startswith("ATOM"):
info = l.split()
start = info[0]
res_type = info[3].strip()
aT = info[2]
chain = l.split()[4]
if aT == atom or (aT == "CA" and res_type == "GLY"):
if number_protomer_atoms == 0:
chain1 = chain
if chain not in chain_dics:
protomerLines.append(l)
number_protomer_atoms+=1
#print protomerLines
if chain != chain1:
chain_dics.append(chain1)
chain1 = chain
if chain in chain_dics:
#Is macro molecule
protomers[countProtomers] = protomerLines
countProtomers+=1
chain_dics = []
protomerLines = []
protomerLines.append(l)
#chain1 = chain
protomers[countProtomers] = protomerLines
return protomers
def main(args):
pdbFile = args.pdb
modes = args.modes
WM = args.wMatrix
VTM = args.vtMatrix
AUnits = args.aUnits
zoomIndex = args.zoom
aType = args.atomType
vmin = args.vmin
vmax = args.vmax
output = args.outdir
#Check paramaters
atomT = args.atomType.upper()
if atomT!='CA' and atomT!='CB':
print ('\n**************************************\nERROR!! Unrecognised atom type\nInput Options:\nCA: to select alpha carbon atoms\nCB: to select beta carbon atoms\n**************************************')
sys.exit()
modes = args.modes
mode_range = []
try:
if modes !='all':
if ':' in modes:
mRange = modes.split(':')
mode_range = range(int(mRange[0])-1,int(mRange[1]))
else:
modesSpecific = modes.split(',')
for m in modesSpecific:
if int(m) <= 0 :
print ('\n**************************************\nWARNING!! ZERO OR NEGATIVE MODES SPECIFIED. MATRIX INDEXES WILL BE AFFECTED\nRESULTS MAY NOT BE ACCURATE\n**************************************')
mode_range.append(int(m)-1)
else:
mode_range = 'all'
except TypeError:
print ('\n**************************************\nERROR!! INVALID INPUT FOR MODES: Default all has been selected\n**************************************')
mode_range = 'all'
AUnitsL = []
try:
if AUnits == '1':
AUnitsL.append(1)
else:
AUnits = AUnits.split(',')
for a in AUnits:
if int(a) <= 0 :
print ('\n**************************************\nWARNING!! ZERO OR NEGATIVE ASYMMETRIC UNITS. MATRIX INDEXES WILL BE AFFECTED\nRESULTS MAY NOT BE ACCURATE\n**************************************')
AUnitsL.append(int(a))
except TypeError:
print ('\n**************************************\nERROR!! INVALID INPUT FOR ASYMMETRIC UNITS: Default 1 has been selected\n**************************************')
AUnitsL = []
AUnitL.append(1)
definedAUnits = defineAssemetricUnits(pdbFile,aType)
calcCovariance(WM, VTM, mode_range, definedAUnits, AUnitsL, zoomIndex,vmin,vmax,output)
silent = False
stream = sys.stdout
def log(message):
global silent
global stream
if not silent:
print_err(message)
if __name__ == "__main__":
# parse cmd arguments
parser = argparse.ArgumentParser()
# standard arguments for logging
parser.add_argument("--silent", help="Turn off logging", action='store_true', default=False)
parser.add_argument("--welcome", help="Display welcome message (true/false)", default="true")
parser.add_argument("--log-file", help="Output log file (default: standard output)", default=None)
parser.add_argument("--outdir", help="Output directory", default="output")
# custom arguments
parser.add_argument("--pdb", help="Input PDB file that was analysed by ANM")
parser.add_argument("--modes", help="1) Calculate the covariance matrix over all modes by using the option all:\n E.g --modes all\nOR\n2) Enter a select range of modes in format M1:M2\n E.g To calculate the covariance matrix over the first 20 non-zero modes enter --modes 7:27\nOR\n3) Calculate the covariance matrix for a combination of specific modes\nEnter mode numbers separated by a comma\n E.g: --modes 1,5,7", default="all")
parser.add_argument("--wMatrix", help="Text file of Eigevalues of pdb, in format output from ANM.cpp")
parser.add_argument("--vtMatrix", help="Text file of Eigevectors of pdb, in row (VT) format output from ANM.cpp")
parser.add_argument("--aUnits", help="1) Specify a single unit.\n E.g --aUnits 1 \nOR\n 2) Provide a list of asymmetric units.\n E.g --aUnits 1,4,5", default = '1')
parser.add_argument("--zoom", help="1) Zoom into a single chain within a specified asymmetric unit.\nSpecify the unit and then the chain in a comma separted list single unit.\n E.g --zoom 1,2 will zoom into chain 2 in asymmetric unit 1",default = '0,0')
parser.add_argument("--atomType", help="Enter CA to select alpha carbons or CB to select beta carbons", default='X')
parser.add_argument("--vmin", help="[float] minimum axes value", type=float, default = -0.1)
parser.add_argument("--vmax", help="[float] maximum axes value", type=float, default = 0.1)
args = parser.parse_args()
if args.welcome == "true":
welcome_msg("Assembly Covariance", "Caroline Ross (caroross299@gmail.com)")
# Check if args supplied by user
if len(sys.argv) > 1:
# Check if required directories exist
if not os.path.isdir(args.outdir):
os.makedirs(args.outdir)
# set up logging
silent = args.silent
if args.log_file:
stream = open(args.log_file, 'w')
start = datetime.now()
log("Started at: %s" % str(start))
# run script
main(args)
end = datetime.now()
time_taken = format_seconds((end - start).seconds)
log("Completed at: %s" % str(end))
log("- Total time: %s" % str(time_taken))
# close logging stream
stream.close()
else:
print ('No arguments provided. Use -h to view help')
| gpl-3.0 |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/pandas/core/config.py | 13 | 22213 | """
The config module holds package-wide configurables and provides
a uniform API for working with them.
Overview
========
This module supports the following requirements:
- options are referenced using keys in dot.notation, e.g. "x.y.option - z".
- keys are case-insensitive.
- functions should accept partial/regex keys, when unambiguous.
- options can be registered by modules at import time.
- options can be registered at init-time (via core.config_init)
- options have a default value, and (optionally) a description and
validation function associated with them.
- options can be deprecated, in which case referencing them
should produce a warning.
- deprecated options can optionally be rerouted to a replacement
so that accessing a deprecated option reroutes to a differently
named option.
- options can be reset to their default value.
- all option can be reset to their default value at once.
- all options in a certain sub - namespace can be reset at once.
- the user can set / get / reset or ask for the description of an option.
- a developer can register and mark an option as deprecated.
- you can register a callback to be invoked when the the option value
is set or reset. Changing the stored value is considered misuse, but
is not verboten.
Implementation
==============
- Data is stored using nested dictionaries, and should be accessed
through the provided API.
- "Registered options" and "Deprecated options" have metadata associcated
with them, which are stored in auxilary dictionaries keyed on the
fully-qualified key, e.g. "x.y.z.option".
- the config_init module is imported by the package's __init__.py file.
placing any register_option() calls there will ensure those options
are available as soon as pandas is loaded. If you use register_option
in a module, it will only be available after that module is imported,
which you should be aware of.
- `config_prefix` is a context_manager (for use with the `with` keyword)
which can save developers some typing, see the docstring.
"""
import re
from collections import namedtuple
from contextlib import contextmanager
import warnings
from pandas.compat import map, lmap, u
import pandas.compat as compat
DeprecatedOption = namedtuple('DeprecatedOption', 'key msg rkey removal_ver')
RegisteredOption = namedtuple(
'RegisteredOption', 'key defval doc validator cb')
_deprecated_options = {} # holds deprecated option metdata
_registered_options = {} # holds registered option metdata
_global_config = {} # holds the current values for registered options
_reserved_keys = ['all'] # keys which have a special meaning
class OptionError(AttributeError, KeyError):
"""Exception for pandas.options, backwards compatible with KeyError
checks"""
#
# User API
def _get_single_key(pat, silent):
keys = _select_options(pat)
if len(keys) == 0:
if not silent:
_warn_if_deprecated(pat)
raise OptionError('No such keys(s): %r' % pat)
if len(keys) > 1:
raise OptionError('Pattern matched multiple keys')
key = keys[0]
if not silent:
_warn_if_deprecated(key)
key = _translate_key(key)
return key
def _get_option(pat, silent=False):
key = _get_single_key(pat, silent)
# walk the nested dict
root, k = _get_root(key)
return root[k]
def _set_option(*args, **kwargs):
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
raise ValueError("Must provide an even number of non-keyword "
"arguments")
# default to false
silent = kwargs.pop('silent', False)
if kwargs:
raise TypeError('_set_option() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
for k, v in zip(args[::2], args[1::2]):
key = _get_single_key(k, silent)
o = _get_registered_option(key)
if o and o.validator:
o.validator(v)
# walk the nested dict
root, k = _get_root(key)
root[k] = v
if o.cb:
o.cb(key)
def _describe_option(pat='', _print_desc=True):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
s = u('')
for k in keys: # filter by pat
s += _build_option_description(k)
if _print_desc:
print(s)
else:
return s
def _reset_option(pat, silent=False):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
if len(keys) > 1 and len(pat) < 4 and pat != 'all':
raise ValueError('You must specify at least 4 characters when '
'resetting multiple keys, use the special keyword '
'"all" to reset all the options to their default '
'value')
for k in keys:
_set_option(k, _registered_options[k].defval, silent=silent)
def get_default_val(pat):
key = _get_single_key(pat, silent=True)
return _get_registered_option(key).defval
class DictWrapper(object):
""" provide attribute-style access to a nested dict
"""
def __init__(self, d, prefix=""):
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __setattr__(self, key, val):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
# you can't set new keys
# can you can't overwrite subtrees
if key in self.d and not isinstance(self.d[key], dict):
_set_option(prefix, val)
else:
raise OptionError("You can only set the value of existing options")
def __getattr__(self, key):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
v = object.__getattribute__(self, "d")[key]
if isinstance(v, dict):
return DictWrapper(v, prefix)
else:
return _get_option(prefix)
def __dir__(self):
return list(self.d.keys())
# For user convenience, we'd like to have the available options described
# in the docstring. For dev convenience we'd like to generate the docstrings
# dynamically instead of maintaining them by hand. To this, we use the
# class below which wraps functions inside a callable, and converts
# __doc__ into a propery function. The doctsrings below are templates
# using the py2.6+ advanced formatting syntax to plug in a concise list
# of options, and option descriptions.
class CallableDynamicDoc(object):
def __init__(self, func, doc_tmpl):
self.__doc_tmpl__ = doc_tmpl
self.__func__ = func
def __call__(self, *args, **kwds):
return self.__func__(*args, **kwds)
@property
def __doc__(self):
opts_desc = _describe_option('all', _print_desc=False)
opts_list = pp_options_list(list(_registered_options.keys()))
return self.__doc_tmpl__.format(opts_desc=opts_desc,
opts_list=opts_list)
_get_option_tmpl = """
get_option(pat)
Retrieves the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_set_option_tmpl = """
set_option(pat, value)
Sets the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
value :
new value of option.
Returns
-------
None
Raises
------
OptionError if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_describe_option_tmpl = """
describe_option(pat, _print_desc=False)
Prints the description for one or more registered options.
Call with not arguments to get a listing for all registered options.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp pattern. All matching keys will have their description displayed.
_print_desc : bool, default True
If True (default) the description(s) will be printed to stdout.
Otherwise, the description(s) will be returned as a unicode string
(for testing).
Returns
-------
None by default, the description(s) as a unicode string if _print_desc
is False
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_reset_option_tmpl = """
reset_option(pat)
Reset one or more options to their default value.
Pass "all" as argument to reset all options.
Available options:
{opts_list}
Parameters
----------
pat : str/regex
If specified only options matching `prefix*` will be reset.
Note: partial matches are supported for convenience, but unless you
use the full option name (e.g. x.y.z.option_name), your code may break
in future versions if new options with similar names are introduced.
Returns
-------
None
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
# bind the functions with their docstrings into a Callable
# and use that as the functions exposed in pd.api
get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
options = DictWrapper(_global_config)
#
# Functions for use by pandas developers, in addition to User - api
class option_context(object):
"""
Context manager to temporarily set options in the `with` statement context.
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
Examples
--------
>>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
...
"""
def __init__(self, *args):
if not (len(args) % 2 == 0 and len(args) >= 2):
raise ValueError(
'Need to invoke as'
'option_context(pat, val, [(pat, val), ...)).'
)
self.ops = list(zip(args[::2], args[1::2]))
def __enter__(self):
undo = []
for pat, val in self.ops:
undo.append((pat, _get_option(pat, silent=True)))
self.undo = undo
for pat, val in self.ops:
_set_option(pat, val, silent=True)
def __exit__(self, *args):
if self.undo:
for pat, val in self.undo:
_set_option(pat, val, silent=True)
def register_option(key, defval, doc='', validator=None, cb=None):
"""Register an option in the package-wide pandas config object
Parameters
----------
key - a fully-qualified key, e.g. "x.y.option - z".
defval - the default value of the option
doc - a string description of the option
validator - a function of a single argument, should raise `ValueError` if
called with a value which is not a legal value for the option.
cb - a function of a single argument "key", which is called
immediately after an option value is set/reset. key is
the full name of the option.
Returns
-------
Nothing.
Raises
------
ValueError if `validator` is specified and `defval` is not a valid value.
"""
import tokenize
import keyword
key = key.lower()
if key in _registered_options:
raise OptionError("Option '%s' has already been registered" % key)
if key in _reserved_keys:
raise OptionError("Option '%s' is a reserved key" % key)
# the default value should be legal
if validator:
validator(defval)
# walk the nested dict, creating dicts as needed along the path
path = key.split('.')
for k in path:
if not bool(re.match('^' + tokenize.Name + '$', k)):
raise ValueError("%s is not a valid identifier" % k)
if keyword.iskeyword(k):
raise ValueError("%s is a python keyword" % k)
cursor = _global_config
for i, p in enumerate(path[:-1]):
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option"
% '.'.join(path[:i]))
if p not in cursor:
cursor[p] = {}
cursor = cursor[p]
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option"
% '.'.join(path[:-1]))
cursor[path[-1]] = defval # initialize
# save the option metadata
_registered_options[key] = RegisteredOption(key=key, defval=defval,
doc=doc, validator=validator,
cb=cb)
def deprecate_option(key, msg=None, rkey=None, removal_ver=None):
"""
Mark option `key` as deprecated, if code attempts to access this option,
a warning will be produced, using `msg` if given, or a default message
if not.
if `rkey` is given, any access to the key will be re-routed to `rkey`.
Neither the existence of `key` nor that if `rkey` is checked. If they
do not exist, any subsequence access will fail as usual, after the
deprecation warning is given.
Parameters
----------
key - the name of the option to be deprecated. must be a fully-qualified
option name (e.g "x.y.z.rkey").
msg - (Optional) a warning message to output when the key is referenced.
if no message is given a default message will be emitted.
rkey - (Optional) the name of an option to reroute access to.
If specified, any referenced `key` will be re-routed to `rkey`
including set/get/reset.
rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
used by the default message if no `msg` is specified.
removal_ver - (Optional) specifies the version in which this option will
be removed. used by the default message if no `msg`
is specified.
Returns
-------
Nothing
Raises
------
OptionError - if key has already been deprecated.
"""
key = key.lower()
if key in _deprecated_options:
raise OptionError("Option '%s' has already been defined as deprecated."
% key)
_deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
#
# functions internal to the module
def _select_options(pat):
"""returns a list of keys matching `pat`
if pat=="all", returns all registered options
"""
# short-circuit for exact key
if pat in _registered_options:
return [pat]
# else look through all of them
keys = sorted(_registered_options.keys())
if pat == 'all': # reserved key
return keys
return [k for k in keys if re.search(pat, k, re.I)]
def _get_root(key):
path = key.split('.')
cursor = _global_config
for p in path[:-1]:
cursor = cursor[p]
return cursor, path[-1]
def _is_deprecated(key):
""" Returns True if the given option has been deprecated """
key = key.lower()
return key in _deprecated_options
def _get_deprecated_option(key):
"""
Retrieves the metadata for a deprecated option, if `key` is deprecated.
Returns
-------
DeprecatedOption (namedtuple) if key is deprecated, None otherwise
"""
try:
d = _deprecated_options[key]
except KeyError:
return None
else:
return d
def _get_registered_option(key):
"""
Retrieves the option metadata if `key` is a registered option.
Returns
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
return _registered_options.get(key)
def _translate_key(key):
"""
if key id deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as - is
"""
d = _get_deprecated_option(key)
if d:
return d.rkey or key
else:
return key
def _warn_if_deprecated(key):
"""
Checks if `key` is a deprecated option and if so, prints a warning.
Returns
-------
bool - True if `key` is deprecated, False otherwise.
"""
d = _get_deprecated_option(key)
if d:
if d.msg:
print(d.msg)
warnings.warn(d.msg, DeprecationWarning)
else:
msg = "'%s' is deprecated" % key
if d.removal_ver:
msg += ' and will be removed in %s' % d.removal_ver
if d.rkey:
msg += ", please use '%s' instead." % d.rkey
else:
msg += ', please refrain from using it.'
warnings.warn(msg, DeprecationWarning)
return True
return False
def _build_option_description(k):
""" Builds a formatted description of a registered option and prints it """
o = _get_registered_option(k)
d = _get_deprecated_option(k)
s = u('%s ') % k
if o.doc:
s += '\n'.join(o.doc.strip().split('\n'))
else:
s += 'No description available.'
if o:
s += u('\n [default: %s] [currently: %s]') % (o.defval,
_get_option(k, True))
if d:
s += u('\n (Deprecated')
s += (u(', use `%s` instead.') % d.rkey if d.rkey else '')
s += u(')')
s += '\n\n'
return s
def pp_options_list(keys, width=80, _print=False):
""" Builds a concise listing of available options, grouped by prefix """
from textwrap import wrap
from itertools import groupby
def pp(name, ks):
pfx = ('- ' + name + '.[' if name else '')
ls = wrap(', '.join(ks), width, initial_indent=pfx,
subsequent_indent=' ', break_long_words=False)
if ls and ls[-1] and name:
ls[-1] = ls[-1] + ']'
return ls
ls = []
singles = [x for x in sorted(keys) if x.find('.') < 0]
if singles:
ls += pp('', singles)
keys = [x for x in keys if x.find('.') >= 0]
for k, g in groupby(sorted(keys), lambda x: x[:x.rfind('.')]):
ks = [x[len(k) + 1:] for x in list(g)]
ls += pp(k, ks)
s = '\n'.join(ls)
if _print:
print(s)
else:
return s
#
# helpers
@contextmanager
def config_prefix(prefix):
"""contextmanager for multiple invocations of API with a common prefix
supported API functions: (register / get / set )__option
Warning: This is not thread - safe, and won't work properly if you import
the API functions into your module using the "from x import y" construct.
Example:
import pandas.core.config as cf
with cf.config_prefix("display.font"):
cf.register_option("color", "red")
cf.register_option("size", " 5 pt")
cf.set_option(size, " 6 pt")
cf.get_option(size)
...
etc'
will register options "display.font.color", "display.font.size", set the
value of "display.font.size"... and so on.
"""
# Note: reset_option relies on set_option, and on key directly
# it does not fit in to this monkey-patching scheme
global register_option, get_option, set_option, reset_option
def wrap(func):
def inner(key, *args, **kwds):
pkey = '%s.%s' % (prefix, key)
return func(pkey, *args, **kwds)
return inner
_register_option = register_option
_get_option = get_option
_set_option = set_option
set_option = wrap(set_option)
get_option = wrap(get_option)
register_option = wrap(register_option)
yield None
set_option = _set_option
get_option = _get_option
register_option = _register_option
# These factories and methods are handy for use as the validator
# arg in register_option
def is_type_factory(_type):
"""
Parameters
----------
`_type` - a type to be compared against (e.g. type(x) == `_type`)
Returns
-------
validator - a function of a single argument x , which returns the
True if type(x) is equal to `_type`
"""
def inner(x):
if type(x) != _type:
raise ValueError("Value must have type '%s'" % str(_type))
return inner
def is_instance_factory(_type):
"""
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which returns the
True if x is an instance of `_type`
"""
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
from pandas.core.common import pprint_thing
type_repr = "|".join(map(pprint_thing, _type))
else:
type_repr = "'%s'" % _type
def inner(x):
if not isinstance(x, _type):
raise ValueError("Value must be an instance of %s" % type_repr)
return inner
def is_one_of_factory(legal_values):
def inner(x):
from pandas.core.common import pprint_thing as pp
if not x in legal_values:
pp_values = lmap(pp, legal_values)
raise ValueError("Value must be one of %s"
% pp("|".join(pp_values)))
return inner
# common type validators, for convenience
# usage: register_option(... , validator = is_int)
is_int = is_type_factory(int)
is_bool = is_type_factory(bool)
is_float = is_type_factory(float)
is_str = is_type_factory(str)
is_unicode = is_type_factory(compat.text_type)
is_text = is_instance_factory((str, bytes))
| apache-2.0 |
jhanley634/testing-tools | problem/bench/traffic/top_wikipedia_pages.py | 1 | 1926 | #! /usr/bin/env python
# Copyright 2020 John Hanley.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
from collections import namedtuple
from operator import attrgetter
import pprint
import pandas as pd
import requests
Pageviews = namedtuple('pageviews', 'article rank views'.split())
def csv_from_json(date='2020/01/22', out_fspec='/tmp/pv.csv', verbose=False):
base = ('https://wikimedia.org/api/rest_v1/metrics'
'/pageviews/top/en.wikipedia.org/all-access')
url = f'{base}/{date}'
print(url)
req = requests.get(url)
req.raise_for_status()
articles = req.json()['items'][0]['articles']
articles = [Pageviews(**article)
for article in articles]
if verbose:
pprint.pprint(sorted(articles, key=attrgetter('views')))
df = pd.DataFrame(articles)
print(df)
df.to_csv(out_fspec, index=False)
if __name__ == '__main__':
csv_from_json()
| mit |
ueshin/apache-spark | python/pyspark/pandas/tests/test_rolling.py | 15 | 6920 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pandas as pd
import pyspark.pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
from pyspark.pandas.window import Rolling
class RollingTest(PandasOnSparkTestCase, TestUtils):
def test_rolling_error(self):
with self.assertRaisesRegex(ValueError, "window must be >= 0"):
ps.range(10).rolling(window=-1)
with self.assertRaisesRegex(ValueError, "min_periods must be >= 0"):
ps.range(10).rolling(window=1, min_periods=-1)
with self.assertRaisesRegex(
TypeError, "psdf_or_psser must be a series or dataframe; however, got:.*int"
):
Rolling(1, 2)
def _test_rolling_func(self, f):
pser = pd.Series([1, 2, 3], index=np.random.rand(3), name="a")
psser = ps.from_pandas(pser)
self.assert_eq(getattr(psser.rolling(2), f)(), getattr(pser.rolling(2), f)())
self.assert_eq(getattr(psser.rolling(2), f)().sum(), getattr(pser.rolling(2), f)().sum())
# Multiindex
pser = pd.Series(
[1, 2, 3],
index=pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")]),
name="a",
)
psser = ps.from_pandas(pser)
self.assert_eq(getattr(psser.rolling(2), f)(), getattr(pser.rolling(2), f)())
pdf = pd.DataFrame(
{"a": [1.0, 2.0, 3.0, 2.0], "b": [4.0, 2.0, 3.0, 1.0]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(getattr(psdf.rolling(2), f)(), getattr(pdf.rolling(2), f)())
self.assert_eq(getattr(psdf.rolling(2), f)().sum(), getattr(pdf.rolling(2), f)().sum())
# Multiindex column
columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(getattr(psdf.rolling(2), f)(), getattr(pdf.rolling(2), f)())
def test_rolling_min(self):
self._test_rolling_func("min")
def test_rolling_max(self):
self._test_rolling_func("max")
def test_rolling_mean(self):
self._test_rolling_func("mean")
def test_rolling_sum(self):
self._test_rolling_func("sum")
def test_rolling_count(self):
self._test_rolling_func("count")
def test_rolling_std(self):
self._test_rolling_func("std")
def test_rolling_var(self):
self._test_rolling_func("var")
def _test_groupby_rolling_func(self, f):
pser = pd.Series([1, 2, 3, 2], index=np.random.rand(4), name="a")
psser = ps.from_pandas(pser)
self.assert_eq(
getattr(psser.groupby(psser).rolling(2), f)().sort_index(),
getattr(pser.groupby(pser).rolling(2), f)().sort_index(),
)
self.assert_eq(
getattr(psser.groupby(psser).rolling(2), f)().sum(),
getattr(pser.groupby(pser).rolling(2), f)().sum(),
)
# Multiindex
pser = pd.Series(
[1, 2, 3, 2],
index=pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z"), ("c", "z")]),
name="a",
)
psser = ps.from_pandas(pser)
self.assert_eq(
getattr(psser.groupby(psser).rolling(2), f)().sort_index(),
getattr(pser.groupby(pser).rolling(2), f)().sort_index(),
)
pdf = pd.DataFrame({"a": [1.0, 2.0, 3.0, 2.0], "b": [4.0, 2.0, 3.0, 1.0]})
psdf = ps.from_pandas(pdf)
self.assert_eq(
getattr(psdf.groupby(psdf.a).rolling(2), f)().sort_index(),
getattr(pdf.groupby(pdf.a).rolling(2), f)().sort_index(),
)
self.assert_eq(
getattr(psdf.groupby(psdf.a).rolling(2), f)().sum(),
getattr(pdf.groupby(pdf.a).rolling(2), f)().sum(),
)
self.assert_eq(
getattr(psdf.groupby(psdf.a + 1).rolling(2), f)().sort_index(),
getattr(pdf.groupby(pdf.a + 1).rolling(2), f)().sort_index(),
)
self.assert_eq(
getattr(psdf.b.groupby(psdf.a).rolling(2), f)().sort_index(),
getattr(pdf.b.groupby(pdf.a).rolling(2), f)().sort_index(),
)
self.assert_eq(
getattr(psdf.groupby(psdf.a)["b"].rolling(2), f)().sort_index(),
getattr(pdf.groupby(pdf.a)["b"].rolling(2), f)().sort_index(),
)
self.assert_eq(
getattr(psdf.groupby(psdf.a)[["b"]].rolling(2), f)().sort_index(),
getattr(pdf.groupby(pdf.a)[["b"]].rolling(2), f)().sort_index(),
)
# Multiindex column
columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
getattr(psdf.groupby(("a", "x")).rolling(2), f)().sort_index(),
getattr(pdf.groupby(("a", "x")).rolling(2), f)().sort_index(),
)
self.assert_eq(
getattr(psdf.groupby([("a", "x"), ("a", "y")]).rolling(2), f)().sort_index(),
getattr(pdf.groupby([("a", "x"), ("a", "y")]).rolling(2), f)().sort_index(),
)
def test_groupby_rolling_count(self):
self._test_groupby_rolling_func("count")
def test_groupby_rolling_min(self):
self._test_groupby_rolling_func("min")
def test_groupby_rolling_max(self):
self._test_groupby_rolling_func("max")
def test_groupby_rolling_mean(self):
self._test_groupby_rolling_func("mean")
def test_groupby_rolling_sum(self):
self._test_groupby_rolling_func("sum")
def test_groupby_rolling_std(self):
# TODO: `std` now raise error in pandas 1.0.0
self._test_groupby_rolling_func("std")
def test_groupby_rolling_var(self):
self._test_groupby_rolling_func("var")
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_rolling import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
hunterowens/data-pipelines | dc_pipeline.py | 1 | 3588 | import pandas as pd
import requests
import folium
import luigi
YEARS = {2012: 'http://opendata.dc.gov/datasets/5f4ea2f25c9a45b29e15e53072126739_7.csv',
2013: 'http://opendata.dc.gov/datasets/4911fcf3527246ae9bf81b5553a48c4d_6.csv',
2014: 'http://opendata.dc.gov/datasets/d4891ca6951947538f6707a6b07ae225_5.csv',
2015: 'http://opendata.dc.gov/datasets/981c105beef74af38cc4090992661264_25.csv',
2016: 'http://opendata.dc.gov/datasets/5d14ae7dcd1544878c54e61edda489c3_24.csv'}
class DownloadTask(luigi.ExternalTask):
"""
downloads data from the portal.
"""
year = luigi.IntParameter(default=2012)
def run(self):
pass
def output(self):
return luigi.LocalTarget('./data/permits-dc-%s.csv' % str(self.year))
class cleanCSV(luigi.Task):
"""
cleans a CSV into the format we'd like for analysis.
you'll want to grab the Ward, Fees, Permit, and Geospacial fees.
"""
def requires(self):
return DownloadTask(self.param)
def run(self):
pass
def output(self):
pass
class mergeDatasets(luigi.Task):
"""
merges the datasets
"""
def requires(self):
return [cleanCSV(year) for year in range(2012,2017)]
def run(self):
pass
def output(self):
pass
class importIntoPandasDF(luigi.Task):
"""
converts the CSVs into pandas dataframes, saves as pickle file.
"""
def requires(self):
return mergeDatasets
def run(self):
pass
def output(self):
pass
class computeWards(luigi.Task):
"""
compute the development by ward per year and save.
Basically, you'll want to value_counts() on the ward field.
How many permits per ward were issued
"""
def requires(self):
return importIntoPandasDF()
def run(self):
pass
def output(self):
pass
class makeMap(luigi.Task):
"""
make a map of development
"""
def requires(self):
return importIntoPandasDF()
def run(self):
"""
We're gonna use Folium to make a map.
I'm giving you some basic code here to get a map object and show you
how to plot a marker.
"""
# make a map
map= folium.Map(location=[38.9072, -77.0369],
zoom_start=12)
# add a marker
folium.Marker([45.3288, -121.6625], popup='Mt. Hood Meadows').add_to(map)
pass
def output(self):
pass
class makePredictions(luigi.Task):
"""
Make predictions for given next years number of ward development,
Given the ward sums, predict the values using a simple regression.
NB: This is not good modeling, but I'm trying to demostrate plumbing here.
"""
def requires(self):
return computeWards()
def run(self):
## Here's the basic skeleton of how do do a linear regression
from sklearn.linear_model import LinearRegression
data = np.asarray(df)
lr = LinearRegression()
X, y = data[:, 1], data[:, 0] ## THIS DEPENDS ON HOW YOU SHAPE YOUR DATA - X should be years, y should be counts
lr.fit(X, y)
lr.predict(2017)
pass
def output(self):
pass
class makeReport(luigi.Task):
"""
Gathers info and saves to report
"""
def requires(self):
return makePredictions(), makeMap()
def run(self):
pass
def output(self):
pass
if __name__ == '__main__':
luigi.run(['DownloadTask', '--local-scheduler'])
| mit |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| mit |
AISystena/web_crawler | lib/image_cnn_gpu/ImageTrainer.py | 1 | 6005 | # coding: utf-8
import six
import sys
import os.path
import pickle
import numpy as np
from sklearn.cross_validation import train_test_split
import chainer
import chainer.links as L
from chainer import optimizers, cuda
import matplotlib.pyplot as plt
import Util
from ImageCnn import ImageCnn
plt.style.use('ggplot')
"""
CNNによるテキスト分類 (posi-nega)
- 5層のディープニューラルネット
- 単語ベクトルにはWordEmbeddingモデルを使用
"""
class ImageTrainer:
def __init__(self, gpu=0, epoch=50, batchsize=5):
current_dir_path = os.path.dirname(__file__)
self.model_pkl = current_dir_path + '/model/image_cnn.pkl'
self.gpu = gpu
self.batchsize = batchsize # minibatch size
self.n_epoch = epoch # エポック数(パラメータ更新回数)
self.weight_decay = 0.01
self.lr = 0.001
# 隠れ層のユニット数
self.mid_units = 2560
self.output_channel = 1280
self.filters = 32
self.n_label = 5
self.input_channel = 3
def dump_model(self, model):
'''
modelを保存
'''
with open(self.model_pkl, 'wb') as pkl:
pickle.dump(model, pkl, -1)
def load_model(self):
'''
modelを読み込む
'''
model = None
if os.path.exists(self.model_pkl):
with open(self.model_pkl, 'rb') as pkl:
model = pickle.load(pkl)
return model
def makeGpuAvailable(self, model):
# GPUを使うかどうか
if self.gpu >= 0:
pass
cuda.check_cuda_available()
cuda.get_device(self.gpu).use()
model.to_gpu()
xp = np if self.gpu < 0 else cuda.cupy # self.gpu <= 0: use cpu
return xp
def train(self):
# Prepare dataset
dataset = Util.load_data()
dataset['source'] = dataset['source'].astype(np.float32) # 特徴量
dataset['target'] = dataset['target'].astype(np.int32) # ラベル
x_train, x_test, y_train, y_test = train_test_split(dataset['source'],
dataset['target'], test_size=0.15)
N_test = y_test.size # test data size
N = len(x_train) # train data size
print('input_channel is {}'.format(self.input_channel))
print('output_channel is {}'.format(self.output_channel))
print('filter_height is {}'.format(self.filters))
print('n_label is {}'.format(self.n_label))
# モデルの定義
model = self.load_model()
if model is None:
model = L.Classifier(ImageCnn(self.input_channel,
self.output_channel, self.filters, self.mid_units, self.n_label))
xp = self.makeGpuAvailable(model)
# Setup optimizer
optimizer = optimizers.AdaGrad()
optimizer.setup(model)
optimizer.lr = self.lr
optimizer.add_hook(chainer.optimizer.WeightDecay(self.weight_decay))
train_loss = []
train_acc = []
test_loss = []
test_acc = []
# Learning loop
for epoch in six.moves.range(1, self.n_epoch + 1):
print('epoch', epoch, '/', self.n_epoch)
# training)
perm = np.random.permutation(N) # ランダムな整数列リストを取得
sum_train_loss = 0.0
sum_train_accuracy = 0.0
for i in six.moves.range(0, N, self.batchsize):
# perm を使い x_train, y_trainからデータセットを選択 (毎回対象となるデータは異なる)
x = chainer.Variable(xp.asarray(x_train[perm[i:i + self.batchsize]])) # source
t = chainer.Variable(xp.asarray(y_train[perm[i:i + self.batchsize]])) # target
optimizer.update(model, x, t)
sum_train_loss += float(model.loss.data) * len(t.data) # 平均誤差計算用
sum_train_accuracy += float(model.accuracy.data) * len(t.data) # 平均正解率計算用
train_loss.append(sum_train_loss / N)
train_acc.append(sum_train_accuracy / N)
print('train mean loss={}, accuracy={}'
.format(sum_train_loss / N, sum_train_accuracy / N))
# evaluation
sum_test_loss = 0.0
sum_test_accuracy = 0.0
for i in six.moves.range(0, N_test, self.batchsize):
# all test data
x = chainer.Variable(xp.asarray(x_test[i:i + self.batchsize]))
t = chainer.Variable(xp.asarray(y_test[i:i + self.batchsize]))
loss = model(x, t)
sum_test_loss += float(loss.data) * len(t.data)
sum_test_accuracy += float(model.accuracy.data) * len(t.data)
test_loss.append(sum_test_loss / N_test)
test_acc.append(sum_test_accuracy / N_test)
print(' test mean loss={}, accuracy={}'.format(
sum_test_loss / N_test, sum_test_accuracy / N_test))
#if epoch > 10:
# optimizer.lr *= 0.97
print('learning rate:{} weight decay:{}'.format(optimizer.lr, self.weight_decay))
sys.stdout.flush()
# modelを保存
self.dump_model(model)
# 精度と誤差をグラフ描画
plt.figure(figsize=(16, 6))
acc_plt = plt.subplot2grid((1, 2), (0, 0))
acc_plt.plot(range(len(train_acc)), train_acc)
acc_plt.plot(range(len(test_acc)), test_acc)
acc_plt.legend(["train_acc", "test_acc"], loc=4)
acc_plt.set_title("Accuracy of digit recognition.")
loss_plt = plt.subplot2grid((1, 2), (0, 1))
loss_plt.plot(range(len(train_loss)), train_loss)
loss_plt.plot(range(len(test_loss)), test_loss)
loss_plt.legend(["train_loss", "test_loss"], loc=4)
loss_plt.set_title("Loss of digit recognition.")
plt.plot()
plt.show()
| mit |
davidam/python-examples | scikit/datasets/plot_out_of_core_classification.py | 3 | 13644 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves.urllib.request import urlretrieve
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
# --------------------------------
#
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
# ----
#
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
alternate_sign=False)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batches of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
# ------------
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = ['b', 'g', 'r', 'c', 'm', 'y']
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| gpl-3.0 |
MartinDelzant/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
datascopeanalytics/scrubadub | scrubadub/comparison.py | 1 | 13150 | import re
import copy
import random
from faker import Faker
from . import filth as filth_module
from .filth import Filth
from .detectors.known import KnownFilthItem
from typing import List, Dict, Union, Optional, Tuple
import pandas as pd
import sklearn.metrics
def get_filth_classification_report(
filth_list: List[Filth],
output_dict: bool = False,
) -> Optional[Union[str, Dict[str, float]]]:
"""Evaluates the performance of detectors using KnownFilth.
An example of using this is shown below:
.. code:: pycon
>>> import scrubadub, scrubadub.comparison, scrubadub.detectors.text_blob
>>> scrubber = scrubadub.Scrubber(detector_list=[
... scrubadub.detectors.TextBlobNameDetector(name='name_detector'),
... scrubadub.detectors.KnownFilthDetector([
... {'match': 'Tom', 'filth_type': 'name'},
... {'match': 'tom@example.com', 'filth_type': 'email'},
... ]),
... ])
>>> filth_list = list(scrubber.iter_filth("Hello I am Tom"))
>>> print(scrubadub.comparison.get_filth_classification_report(filth_list))
filth detector locale precision recall f1-score support
<BLANKLINE>
name name_detector en_US 1.00 1.00 1.00 1
<BLANKLINE>
accuracy 1.00 1
macro avg 1.00 1.00 1.00 1
weighted avg 1.00 1.00 1.00 1
<BLANKLINE>
:param filth_list: The list of detected filth
:type filth_list: A list of `Filth` objects
:param output_dict: Return the report in JSON format, defautls to False
:type output_dict: bool, optional
:return: The report in JSON (a `dict`) or in plain text
:rtype: `str` or `dict`
"""
results = [] # type: List[Dict[str, int]]
filth_max_length = 0
detector_name_max_length = 0
locale_max_length = 0
for filth_item in filth_list:
sub_filths = [filth_item]
if isinstance(filth_item, filth_module.base.MergedFilth):
sub_filths = filth_item.filths
results_row = {}
for sub_filth in sub_filths:
if isinstance(sub_filth, filth_module.KnownFilth) and sub_filth.comparison_type is not None:
results_row[
'{}:{}:{}'.format(sub_filth.comparison_type, filth_module.KnownFilth.type, sub_filth.locale)] = 1
else:
try:
results_row['{}:{}:{}'.format(sub_filth.type, sub_filth.detector_name, sub_filth.locale)] = 1
except AttributeError:
print(type(sub_filth), sub_filth)
raise
# Dont include filth that was not produced by one of the detectors of interest
if sum(results_row.values()) > 0:
results.append(results_row)
if len(results) == 0:
return None
results_df = pd.DataFrame(results).fillna(0).astype(int)
results_df.columns = pd.MultiIndex.from_tuples(
results_df.columns.str.split(':').values.tolist(),
names=['filth_type', 'detector_name', 'locale'],
)
# Find filth types that have some known filth
known_types = [x[0] for x in results_df.columns if x[1] == filth_module.KnownFilth.type]
# Select columns for filth that have related known filth, but that are not known filth
detected_columns = [
x for x in results_df.columns
if x[1] != filth_module.KnownFilth.type and x[0] in known_types
]
detected_classes = results_df.loc[:, detected_columns].values
# Take the detected_columns above and find their associated known counterparts
known_cols = [(x[0], filth_module.KnownFilth.type, x[2]) for x in detected_columns]
true_classes = results_df.loc[:, known_cols].values
if not output_dict:
filth_max_length = max([len(x[0]) for x in detected_columns] + [len("filth")])
detector_name_max_length = max([len(x[1]) for x in detected_columns] + [len("detector")]) + 4
locale_max_length = max([len(x[2]) for x in detected_columns] + [len("locale")]) + 4
class_labels = [
"{} {} {} ".format(
x[0].rjust(filth_max_length),
x[1].rjust(detector_name_max_length),
x[2].rjust(locale_max_length)
)
for x in detected_columns
]
else:
class_labels = ["{}:{}:{}".format(*x) for x in detected_columns]
report_labels = []
# If there is only one label reshape the data so that
# the classification_report interprets it less ambiguously
if detected_classes.shape[1] == 1:
detected_classes = detected_classes.T[0]
true_classes = true_classes.T[0]
report_labels = [1]
else:
report_labels = [class_labels.index(x) for x in sorted(class_labels)]
class_labels = sorted(class_labels)
report = sklearn.metrics.classification_report(
true_classes,
detected_classes,
output_dict=output_dict,
zero_division=0,
target_names=class_labels,
labels=report_labels,
# **extra_args
)
if not output_dict:
report = (
'filth'.rjust(filth_max_length) +
'detector'.rjust(detector_name_max_length + 1) +
'locale'.rjust(locale_max_length + 1) +
(' '*4) +
report.lstrip(' ')
)
return report
def get_filth_dataframe(filth_list: List[Filth]) -> pd.DataFrame:
"""Produces a pandas `DataFrame` to allow debugging and improving detectors.
An example of using this is shown below:
.. code:: pycon
>>> import scrubadub, scrubadub.comparison, scrubadub.detectors.text_blob
>>> scrubber = scrubadub.Scrubber(detector_list=[
... scrubadub.detectors.TextBlobNameDetector(name='name_detector'),
... scrubadub.detectors.KnownFilthDetector([
... {'match': 'Tom', 'filth_type': 'name'},
... {'match': 'tom@example.com', 'filth_type': 'email'},
... ]),
... ])
>>> filth_list = list(scrubber.iter_filth("Hello I am Tom"))
>>> with pd.option_context("display.max_columns", 20):
... print(scrubadub.comparison.get_filth_dataframe(filth_list)) # doctest: +NORMALIZE_WHITESPACE
group_id filth_id filth_type detector_name document_name text beg end \\
0 0 0 name name_detector None Tom 11 14
<BLANKLINE>
locale known_filth comparison_type known_text known_beg known_end \\
0 en_US True NaN Tom 11 14
<BLANKLINE>
known_comparison_type exact_match partial_match true_positive \\
0 name True True True
<BLANKLINE>
false_positive false_negative
0 False False
:param filth_list: The list of detected filth
:type filth_list: A list of `Filth` objects
:return: A `pd.DataFrame` containing infomatoin about the detected `Filth`
:rtype: `pd.DataFrame`
"""
results = []
for group_id, filth_item in enumerate(filth_list):
sub_filths = [filth_item]
if isinstance(filth_item, filth_module.base.MergedFilth):
sub_filths = filth_item.filths
for filth_id, sub_filth in enumerate(sub_filths):
results.append({
'group_id': group_id,
'filth_id': filth_id,
'filth_type': sub_filth.type,
'detector_name': getattr(sub_filth, 'detector_name', float('nan')),
'document_name': getattr(sub_filth, 'document_name', float('nan')),
'text': sub_filth.text,
'beg': sub_filth.beg,
'end': sub_filth.end,
'locale': sub_filth.locale,
'known_filth': isinstance(sub_filth, filth_module.KnownFilth),
'comparison_type': getattr(sub_filth, 'comparison_type', float('nan')),
})
results_df = pd.DataFrame(results)
suffix_label = '_y_suffix'
return (
pd.merge(
results_df[~results_df['known_filth']],
results_df[results_df['known_filth']][['group_id', 'text', 'beg', 'end', 'comparison_type']],
how='outer',
left_on=('group_id', 'filth_type'),
right_on=('group_id', 'comparison_type'),
suffixes=('', suffix_label)
)
.rename(columns=lambda x: x if not x.endswith(suffix_label) else 'known_' + x[:-len(suffix_label)])
.assign(
known_filth=lambda df: ~pd.isnull(df['known_text']),
exact_match=lambda df: (df['text'] == df['known_text']).fillna(False),
partial_match=lambda df: ((df['beg'] < df['known_end']) & (df['end'] > df['known_beg']).fillna(False)),
true_positive=lambda df: (~pd.isnull(df['known_text'])) & (~pd.isnull(df['text'])),
false_positive=lambda df: (pd.isnull(df['known_text'])) & (~pd.isnull(df['text'])),
false_negative=lambda df: (~pd.isnull(df['known_text'])) & (pd.isnull(df['text'])),
)
)
def make_fake_document(
paragraphs: int = 20, locale: str = 'en_US', seed: Optional[int] = None, faker: Optional[Faker] = None,
filth_types: Optional[List[str]] = None
) -> Tuple[str, List[KnownFilthItem]]:
"""Creates a fake document containing `Filth` that needs to be removed. Also returns the list of known filth
items that are needed byt the `KnownFilthDetector`\\ .
An example of using this is shown below:
.. code:: pycon
>>> import scrubadub, scrubadub.comparison
>>> document, known_filth_items = scrubadub.comparison.make_fake_document(paragraphs=1, seed=1)
>>> scrubber = scrubadub.Scrubber()
>>> scrubber.add_detector(scrubadub.detectors.KnownFilthDetector(known_filth_items=known_filth_items))
>>> filth_list = list(scrubber.iter_filth(document))
>>> print(scrubadub.comparison.get_filth_classification_report(filth_list))
filth detector locale precision recall f1-score support
<BLANKLINE>
url url en_US 1.00 1.00 1.00 1
email email en_US 1.00 1.00 1.00 2
<BLANKLINE>
micro avg 1.00 1.00 1.00 3
macro avg 1.00 1.00 1.00 3
weighted avg 1.00 1.00 1.00 3
samples avg 1.00 1.00 1.00 3
<BLANKLINE>
:param paragraphs: The list of detected filth
:type paragraphs: int
:param locale: The locale of the documents in the format: 2 letter lower-case language code followed by an
underscore and the two letter upper-case country code, eg "en_GB" or "de_CH"
:type locale: str
:param seed: The random seed used to generate the document
:type seed: int, optional
:param faker: A Faker object that is used to generate the text
:type faker: int
:param filth_types: A list of the ``Filth.type`` to generate
:type filth_types: List[str]
:return: The document and a list of `KnownFilthItem`\\ s
:rtype: Tuple[str, List[KnownFilthItem]]
"""
if faker is None:
faker = Faker(locale=locale)
# TODO: register filth types to build up a dict that can be read from, like the detectors
possible_filth = [
filth_module.AddressFilth,
filth_module.EmailFilth,
filth_module.NameFilth,
filth_module.PhoneFilth,
filth_module.PostalCodeFilth,
filth_module.SSNFilth,
filth_module.TwitterFilth,
filth_module.UrlFilth,
]
if filth_types is not None:
possible_filth = [filth for filth in possible_filth if filth.type in filth_types]
if seed is not None:
Faker.seed(seed)
random.seed(seed)
doc = ""
known_items = [] # type: List[KnownFilthItem]
for i_paragraph in range(paragraphs):
for i_sentance_group in range(random.randint(1, 10)):
text = faker.text()
matches = list(re.finditer(r'[\s.]', text))
position = random.choice(matches)
chosen_filth = random.choice(possible_filth)
pii_text = chosen_filth.generate(faker=faker)
known_items.append({
'match': copy.copy(pii_text),
'filth_type': copy.copy(chosen_filth.type),
})
doc += (
text[:position.start()] +
position.group() +
pii_text +
position.group() +
text[position.end():]
)
doc += "\n\n"
return (doc.strip(), known_items)
| mit |
byzin/Nanairo | tool/spectral_transport/spectral_transport.py | 1 | 27081 | # file: spectral_transport.py
# Import system plugins
import math
import os
import sys
import time
# Import custom plugins
def printPluginError(plugin_name, package_name):
sys.stderr.write("'{}' is required.".format(plugin_name))
sys.stderr.write(" Please install the package using 'pip install {}'.\n".format(package_name))
kHavePlugins = True
# NumPy
try:
import numpy as np
except:
printPluginError("NumPy", "numpy")
kHavePlugins = False
# SciPy
try:
import scipy
except:
printPluginError("SciPy", "scipy")
kHavePlugins = False
# Colour Science
try:
import colour.plotting as clr
import colour.recovery as rec
import colour
except:
printPluginError("Colour Science", "colour-science")
kHavePlugins = False
# Matplotlib
try:
import matplotlib.pyplot as plt
from matplotlib.path import Path
except:
printPluginError("Matplotlib", "matplotlib")
kHavePlugins = False
# Global variables
kLicenseString = '\
/*!\n\
Copyright (c) 2015-2018 Sho Ikeda\n\
This software is released under the MIT License.\n\
http://opensource.org/licenses/mit-license.php\n\
*/\n'
kShortestWavelength = 380
kLongestWavelength = 780
kWavelengthResolution = 10
assert(((kLongestWavelength - kShortestWavelength) % kWavelengthResolution) == 0)
kGridResolution = (6, 4)
kCmfXBarPath = "xyz_cmf/cie_sco_2degree_xbar.csv"
kCmfYBarPath = "xyz_cmf/cie_sco_2degree_ybar.csv"
kCmfZBarPath = "xyz_cmf/cie_sco_2degree_zbar.csv"
# ------------------------------------------------------------------------------
# Return the num of wavelengths.
# ------------------------------------------------------------------------------
def spectraSize():
return (kLongestWavelength - kShortestWavelength) // kWavelengthResolution + 1
# ------------------------------------------------------------------------------
# Color matching functions.
# ------------------------------------------------------------------------------
class Cmf:
x_bar = []
y_bar = []
z_bar = []
@staticmethod
def getWavelength(index):
assert(0 <= index < spectraSize())
return kShortestWavelength + index * kWavelengthResolution
@classmethod
def initCmf(cls):
w, x_bar = np.loadtxt(kCmfXBarPath, delimiter=',', skiprows=2, unpack=True)
_, y_bar = np.loadtxt(kCmfYBarPath, delimiter=',', skiprows=2, unpack=True)
_, z_bar = np.loadtxt(kCmfZBarPath, delimiter=',', skiprows=2, unpack=True)
assert(w[0] <= kShortestWavelength)
assert(kLongestWavelength <= w[-1])
for i in range(spectraSize()):
wavelength = Cmf.getWavelength(i)
cls.x_bar.append(np.interp(wavelength, w, x_bar))
cls.y_bar.append(np.interp(wavelength, w, y_bar))
cls.z_bar.append(np.interp(wavelength, w, z_bar))
@classmethod
def getX(cls, index):
assert(0 <= index < spectraSize())
return cls.x_bar[index]
@classmethod
def getY(cls, index):
assert(0 <= index < spectraSize())
return cls.y_bar[index]
@classmethod
def getZ(cls, index):
assert(0 <= index < spectraSize())
return cls.z_bar[index]
@staticmethod
def toXyz(spectra):
'''As CIE instructs, we integrate using simple summation.'''
assert(len(spectra) == spectraSize())
xyz = [0.0, 0.0, 0.0]
for i in range(spectraSize()):
s = spectra[i]
xyz[0] += s * Cmf.getX(i)
xyz[1] += s * Cmf.getY(i)
xyz[2] += s * Cmf.getZ(i)
d_lambda = float(kWavelengthResolution)
return [v * d_lambda for v in xyz]
@staticmethod
def getEEWhite():
ee_white = [1.0] * spectraSize()
return Cmf.toXyz(ee_white)
# ------------------------------------------------------------------------------
# Transform between color spaces.
# ------------------------------------------------------------------------------
class Transform:
# --------------------------------------------------------------------------
# Homogenize/dehomogenize vectors.
# --------------------------------------------------------------------------
@staticmethod
def hom(v2):
assert(len(v2) >= 2)
return np.matrix([[v2[0]], [v2[1]], [1]])
@staticmethod
def dehom(v3):
assert((v3.shape[0] == 3 and v3.shape[1] == 1)
or (v3.shape[0] == 1 and v3.shape[1] == 3))
v = v3.flatten().tolist()[0]
return [v[0] / v[2], v[1] / v[2]]
# --------------------------------------------------------------------------
# Convert from xyy to xyz and back.
# --------------------------------------------------------------------------
@staticmethod
def toXyzFromXyy(xyy):
return (xyy[0] * xyy[2] / xyy[1],
xyy[2],
(1 - xyy[0] - xyy[1]) * xyy[2] / xyy[1])
@staticmethod
def toXyyFromXyz(xyz):
s = sum(xyz)
return (xyz[0] / s, xyz[1] / s, xyz[1])
# --------------------------------------------------------------------------
# Convert from xy to xy* and back.
# --------------------------------------------------------------------------
mat_xystar_to_xy = None
mat_xy_to_xystar = None
@classmethod
def initXystar(cls):
'''xy* is a color space where the line between blue and red is horizontal.
Also, equal-energy white is the origin.
xy* depends only on the color matching functions used.'''
num_bins = spectraSize()
# Pure blue.
s = [0.0] * num_bins
s[0] = 1.0
xy0 = cls.toXyyFromXyz(Cmf.toXyz(s))
# Pure red.
s = [0.0] * num_bins
s[-1] = 1.0
xy1 = cls.toXyyFromXyz(Cmf.toXyz(s))
d = np.array(xy1[:2]) - np.array(xy0[:2])
d /= math.sqrt(np.vdot(d, d))
# Translation to make ee-white (in xy) the origin.
T = np.matrix([[ 1.0, 0.0, -1/3],
[ 0.0, 1.0, -1/3],
[ 0.0, 0.0, 1.0]])
# Rotation to make purple line horizontal.
R = np.matrix([[ d[0], d[1], 0.0],
[-d[1], d[0], 0.0],
[ 0.0, 0.0, 1.0]])
cls.mat_xy_to_xystar = np.dot(R, T)
cls.mat_xystar_to_xy = cls.mat_xy_to_xystar.getI()
@classmethod
def toXystarFromXy(cls, xy):
return cls.dehom(np.dot(cls.mat_xy_to_xystar, cls.hom(xy)))
@classmethod
def toXyFromXystar(cls, xystar):
return cls.dehom(np.dot(cls.mat_xystar_to_xy, cls.hom(xystar)))
# --------------------------------------------------------------------------
# Convert from xy to uv and back.
# --------------------------------------------------------------------------
mat_uv_to_xystar = None
mat_xystar_to_uv = None
mat_uv_to_xy = None
mat_xy_to_uv = None
@classmethod
def initUv(cls, xystar_bbox, grid_res):
'''uv is derived from xy* by transforming grid points to integer coordinates.
uv depends on xy* and the grid used.'''
# Translate xystar bounding box min to origin.
T = np.matrix([[1.0, 0.0, -xystar_bbox[0][0]],
[0.0, 1.0, -xystar_bbox[0][1]],
[0.0, 0.0, 1.0]])
# Scale so that one grid cell has unit size.
w = xystar_bbox[1][0] - xystar_bbox[0][0]
h = xystar_bbox[1][1] - xystar_bbox[0][1]
S = np.matrix([[grid_res[0] / w, 0.0, 0.0],
[0.0, grid_res[1] / h, 0.0],
[0.0, 0.0, 1.0]])
cls.mat_xystar_to_uv = np.dot(S, T)
cls.mat_uv_to_xystar = cls.mat_xystar_to_uv.getI()
cls.mat_xy_to_uv = np.dot(cls.mat_xystar_to_uv, cls.mat_xy_to_xystar)
cls.mat_uv_to_xy = cls.mat_xy_to_uv.getI()
@classmethod
def toUvFromXy(cls, xy):
return cls.dehom(np.dot(cls.mat_xy_to_uv, cls.hom(xy)))
@classmethod
def toXyFromUv(cls, uv):
return cls.dehom(np.dot(cls.mat_uv_to_xy, cls.hom(uv)))
@classmethod
def toUvFromXystar(cls, xystar):
return cls.dehom(np.dot(cls.mat_xystar_to_uv, cls.hom(xystar)))
@classmethod
def toXystarFromUv(cls, uv):
return cls.dehom(np.dot(cls.mat_uv_to_xystar, cls.hom(uv)))
# ------------------------------------------------------------------------------
# Given a color in XYZ, determine a smooth spectrum that corresponds to that
# color.
# ------------------------------------------------------------------------------
def findSpectrum(xyz):
from scipy.optimize import minimize
# As an objective, we use a similar roughness term as Smits did.
def objective(S):
roughness = 0
for i in range(len(S)-1):
roughness += (S[i] - S[i+1]) ** 2
# Note: We found much better convergence with the square term!
# roughness = math.sqrt(roughness)
return roughness
num_bins = spectraSize()
x0 = [1] * num_bins
# Constraint: Match XYZ values.
cnstr = {
'type': 'eq',
'fun': lambda s: (np.array(Cmf.toXyz(s)) - xyz)
}
# We want positive spectra.
bnds = [(0, 1000)] * num_bins
res = minimize(objective, x0, method='SLSQP', constraints=cnstr,
bounds=bnds, options={"maxiter": 2000, "ftol": 1e-10})
if not res.success:
err_message = 'Error for xyz={} after {} iterations: {}'.format(xyz, res.nit, res.message)
return ([0] * num_bins, True, err_message)
else:
# The result may contain some very tiny negative values due
# to numerical issues. Clamp those to 0.
return ([max(x, 0) for x in res.x], False, "")
# ------------------------------------------------------------------------------
# Get the boundary of the horseshoe as a path in xy*.
# ------------------------------------------------------------------------------
def makeHorseshoePath():
verts = []
codes = []
d_lambda = float(kWavelengthResolution)
for i in range(spectraSize()):
xyz = [0.0, 0.0, 0.0]
xyz[0] = d_lambda * Cmf.getX(i)
xyz[1] = d_lambda * Cmf.getY(i)
xyz[2] = d_lambda * Cmf.getZ(i)
xyy = Transform.toXyyFromXyz(xyz)
xystar = Transform.toXystarFromXy(xyy[:2])
verts.append(xystar)
codes.append(Path.LINETO)
codes[0] = Path.MOVETO
codes.append(Path.CLOSEPOLY)
vx = [x for (x, y) in verts]
vy = [y for (x, y) in verts]
bbox = [ (min(vx), min(vy)), (max(vx), max(vy)) ]
verts.append((0,0))
return (Path(verts, codes), bbox)
# ------------------------------------------------------------------------------
# Grid data structures.
# ------------------------------------------------------------------------------
class DataPoint:
def __init__(self):
self.xystar = (0, 0)
self.uv = (0, 0)
self.Y = 0
self.spectrum = [0]
self.M = 0
self.inside = False
self.equal_energy_white = False
self.broken = False
def updateUv(self):
self.uv = Transform.toUvFromXystar(self.xystar)
class GridCell:
def __init__(self):
self.indices = []
self.triangles = []
self.inside = True
# binary search to find intersection
def findIntersection(p0, p1, i0, i1, clip_path):
delta = p1 - p0
if np.linalg.norm(delta) < 0.0001:
# Points are very close, terminate.
# Move new intersection slightly into the gamut.
delta *= 0.998
if i0:
return p1 - delta
else:
return p0 + delta
p01 = 0.5 * (p0 + p1)
i01 = clip_path.contains_point(p01)
if i0 != i01:
return findIntersection(p0, p01, i0, i01, clip_path)
elif i1 != i01:
return findIntersection(p01, p1, i01, i1, clip_path)
else:
print ("something wrong here")
return p01
def clipEdge(d0, d1, clip_path):
from operator import xor
if not xor(d0.inside, d1.inside):
return (False, None)
p0 = np.array(d0.xystar)
p1 = np.array(d1.xystar)
p = findIntersection(p0, p1, d0.inside, d1.inside, clip_path)
data_point = DataPoint()
data_point.xystar = p
data_point.inside = True
return (True, data_point)
def generateXystarGrid(scale):
print("Generating clip path ...")
clip_path, bbox = makeHorseshoePath()
# We know that xy(1/3, 1/3) = xystar(0, 0) must be a grid point.
# subdivide the rectangle between that and the purest red regularly with res.
# Note: This can be freely chosen, but we found 6,4 to be a reasonable value.
res = kGridResolution
white_xystar = [0.0, 0.0]
step_x = abs(white_xystar[0] - bbox[1][0]) / res[0]
step_y = abs(white_xystar[1] - bbox[0][1]) / res[1]
# Find bbox top left corner so that the whole diagram is contained.
add_x = int(math.ceil(abs(white_xystar[0] - bbox[0][0]) / step_x))
add_y = int(math.ceil(abs(bbox[1][1] - white_xystar[1]) / step_y))
# The index of white - we will set this spectrum to equal energy white.
white_idx = (add_x, res[1])
grid_res = (res[0] + add_x, res[1] + add_y)
bbox = [
# min
(white_xystar[0]- step_x * add_x, bbox[0][1]),
# max
(bbox[1][0], white_xystar[1] + step_y * add_y)
]
grid = [GridCell() for i in range(grid_res[0] * grid_res[1])]
data_points = []
# Generate grid points.
print(" Generating grid points in xy* ...")
for (x,y) in [(x,y) for y in range(grid_res[1]+1) for x in range(grid_res[0]+1)]:
data_point = DataPoint()
data_point.xystar = (bbox[0][0] + step_x * x, bbox[0][1] + step_y * y)
if (x, y) == white_idx:
# Numerically, we want the white point to be at xy = (1/3, 1/3).
delta = np.array(data_point.xystar) - np.array(white_xystar)
assert(np.dot(delta, delta) < 1e-7)
data_point.equal_energy_white = True
# Clip on horseshoe.
if clip_path.contains_point(data_point.xystar) \
or (x > 0 and y == 0): # Special case for purple line.
data_point.inside = True
new_idx = len(data_points)
data_points.append(data_point)
# Add new index to this all four adjacent cells.
for (cx, cy) in [(x-dx, y-dy) for dy in range(2) for dx in range(2)]:
if cx >= 0 and cx < grid_res[0] and cy >= 0 and cy < grid_res[1]:
cell = grid[cy * grid_res[0] + cx]
cell.indices.append(new_idx)
cell.inside = cell.inside and data_point.inside
# Clip grid cells against horseshoe.
print(" Clipping cells to xy gamut ...")
for (x, y) in [(x, y) for x in range(grid_res[0]) for y in range(grid_res[1])]:
cell = grid[y * grid_res[0] + x]
# No need to clip cells that are completely inside.
if cell.inside:
continue
# We clip the two outgoing edges of each point:
#
# d2
# .
# d0 . d1
# Note: We assume here that data_points was generated as a regular
# grid in row major order.
d0 = data_points[(y+0)*(grid_res[0]+1)+(x+0)]
d1 = data_points[(y+0)*(grid_res[0]+1)+(x+1)]
d2 = data_points[(y+1)*(grid_res[0]+1)+(x+0)]
(clipped_h, p_h) = clipEdge(d0, d1, clip_path)
if clipped_h:
new_idx = len(data_points)
data_points.append(p_h)
cell.indices.append(new_idx)
if y > 0:
grid[(y-1) * grid_res[0] + x].indices.append(new_idx)
(clipped_v, p_v) = clipEdge(d0, d2, clip_path)
if clipped_v:
new_idx = len(data_points)
data_points.append(p_v)
cell.indices.append(new_idx)
if x > 0:
grid[y * grid_res[0] + x - 1].indices.append(new_idx)
# Compact grid points (throw away points that are not inside).
print(" Compacting grid ...")
new_data_points = []
new_indices = []
prefix = 0
for data_point in data_points:
if data_point.inside:
new_indices.append(prefix)
new_data_points.append(data_point)
prefix += 1
else:
new_indices.append(-1)
data_points = new_data_points
for gridcell in grid:
new_cell_indices = []
for index in range(len(gridcell.indices)):
old_index = gridcell.indices[index]
if new_indices[old_index] >= 0:
new_cell_indices.append(new_indices[old_index])
gridcell.indices = new_cell_indices[:]
# Scale points down towards white point to avoid singular spectra.
for data_point in data_points:
data_point.xystar = [v * scale for v in data_point.xystar]
bbox[0] = [v * scale for v in bbox[0]]
bbox[1] = [v * scale for v in bbox[1]]
return data_points, grid, grid_res, bbox
# ------------------------------------------------------------------------------
# Compute spectra for all data points.
# ------------------------------------------------------------------------------
def computeDataSpectra(data_point):
xy = Transform.toXyFromUv(data_point.uv)
# Set luminance to y. This means that X+Y+Z = 1,
# since y = Y / (X+Y+Z) = y / (X+Y+Z).
xyY = [xy[0], xy[1], xy[1]]
xyz = Transform.toXyzFromXyy(xyY)
spectrum = []
broken = False
if data_point.equal_energy_white:
# Since we want X=Y=Z=1/3 (so that X+Y+Z=1), the equal-energy white
# spectrum we want is 1/(3 int(x)) for x color matching function.
spectrum = [1 / (3 * Cmf.getEEWhite()[0])] * spectraSize()
else:
spectrum, broken, message = findSpectrum(xyz)
if broken:
print("Couldn't find a spectrum for uv=({uv[0]},{uv[1]})".format(uv=data_point.uv))
print(message)
xyz = Cmf.toXyz(spectrum)
sum = xyz[0] + xyz[1] + xyz[2]
if sum < 0.99 or 1.01 < sum:
print('Invalid brightness {} for uv=({uv[0]},{uv[1]})'.format(sum, uv=data_point.uv))
return (spectrum, broken)
# ------------------------------------------------------------------------------
def computeSpectra(data_points):
print('Computing spectra ...')
# TODO: Parallelize for multiplatform
num_done = 0
data_size = len(data_points)
def printStatus():
print('\033[2K\r{} done, {} total: '.format(num_done, data_size), end='')
printStatus()
for data in data_points:
spectrum, broken = computeDataSpectra(data)
data.spectrum = spectrum
data.broken = broken
# Print status
num_done = num_done + 1
printStatus()
# ------------------------------------------------------------------------------
# Write spectral data
# ------------------------------------------------------------------------------
def writeSpectralTransportParameters(data_points, grid, grid_res, filename):
#def writeSpectralTransportParameters(filename):
with open(filename, 'w') as f:
# Header
include_guard_name = \
'NANAIRO_SPECTRAL_TRANSPORT_PARAMETERS_{0}_{1}_{2}_HPP'.format(
kShortestWavelength,
kLongestWavelength,
kWavelengthResolution)
f.write(kLicenseString)
f.write('\n')
f.write('#ifndef {0}\n'.format(include_guard_name))
f.write('#define {0}\n'.format(include_guard_name))
f.write('\n')
f.write('// Standard C++ library\n')
f.write('#include <array>\n')
f.write('// Zisc\n')
f.write('#include "zisc/arith_array.hpp"\n')
f.write('#include "zisc/matrix.hpp"\n')
f.write('// Nanairo\n')
f.write('#include "NanairoCore/nanairo_core_config.hpp"\n')
f.write('#include "NanairoCore/Color/color.hpp"\n')
f.write('\n')
f.write('namespace nanairo {\n')
f.write('\n')
f.write('namespace spectral_transport {\n')
f.write('\n')
# Body
f.write('static constexpr uint kGridResolution[] = {{ {}, {} }};\n'.format(grid_res[0], grid_res[1]))
f.write('\n')
f.write('static constexpr zisc::Matrix<Float, 2, 3> kToXystarFromXy{\n')
f.write(' {m[0]}, {m[1]}, {m[2]},\n {m[3]}, {m[4]}, {m[5]} }};\n'.format(m=Transform.mat_xy_to_xystar[:2,:].flatten().tolist()[0]))
f.write('static constexpr zisc::Matrix<Float, 2, 3> kToUvFromXy{\n')
f.write(' {m[0]}, {m[1]}, {m[2]},\n {m[3]}, {m[4]}, {m[5]} }};\n'.format(m=Transform.mat_xy_to_uv[:2,:].flatten().tolist()[0]))
f.write('\n')
f.write('static constexpr Float kInvEqualEnergyReflectance = {};\n'.format(max(Cmf.getEEWhite())))
f.write('\n')
# Grid cell
max_num_idx = 0
for cell in grid:
if max_num_idx < len(cell.indices):
max_num_idx = len(cell.indices)
f.write('// Grid cells. Laid out in row-major format.\n')
f.write('struct GridCell {\n')
f.write(' std::array<int, {}> indices_;\n'.format(max_num_idx))
f.write(' uint num_points_;\n')
f.write(' bool inside_;\n')
f.write('};\n')
f.write('\n')
f.write('static constexpr GridCell kGrid[] = {\n')
cell_strings = []
for (x, y) in [(x,y) for y in range(grid_res[1]) for x in range(grid_res[0])]:
cell = grid[y * grid_res[0] + x]
# pad cell indices with -1.
padded_indices = cell.indices[:] + [-1] * (max_num_idx - len(cell.indices))
num_inside = len(cell.indices)
if 0 < num_inside:
idx_str = ', '.join(map(str, padded_indices))
if cell.inside and num_inside == 4:
cell_strings.append(' {{ {{{{ {} }}}}, {}, true }}'.format(idx_str, num_inside))
else:
cell_strings.append(' {{ {{{{ {} }}}}, {}, false }}'.format(idx_str, num_inside))
else:
cell_strings.append(' {{ {{{{ {} }}}}, 0, false }}'.format(', '.join(['-1'] * max_num_idx)))
f.write(',\n'.join(cell_strings))
f.write('\n};\n')
f.write('\n')
# Data point
f.write('// Grid data points.\n')
f.write('struct DataPoint {\n')
f.write(' zisc::ArithArray<Float, {}> spectra_;\n'.format(spectraSize()))
f.write(' Color<2> xystar_;\n')
f.write(' Color<2> uv_;\n')
f.write('};\n')
f.write('\n')
data_point_strings = []
for p in data_points:
spec_str = ', '.join(map(str, list(p.spectrum)))
data_point_strings.append(' {{ {{ {spec} }}, {{ {p.xystar[0]}, {p.xystar[1]} }}, {{ {p.uv[0]}, {p.uv[1]} }} }}'.format(p=p, spec=spec_str))
f.write('static constexpr DataPoint kDataPoints[] = {\n')
f.write(',\n'.join(data_point_strings))
f.write('\n};\n')
f.write('\n')
# Footer
f.write('} // namespace spectral_transport\n')
f.write('\n')
f.write('} // namespace nanairo\n')
f.write('\n')
f.write('#endif // {0}\n'.format(include_guard_name))
# ------------------------------------------------------------------------------
# We need to triangulate along the spectral locus, since our regular grid
# cannot properly capture this edge.
# ------------------------------------------------------------------------------
def createTriangleFans(grid):
print("generating triangle fans...")
for cell in grid:
num_points = len(cell.indices)
# skip trivial inner cells (full quad interpolation)\n",
if len(cell.indices) == 4 and cell.inside:
# these could be sorted here, too. but turns out we always get them in scanline order
# so we will know exactly how to treat them in the exported c code.
continue
# triangulate hard cases (irregular quads + n-gons, 5-gons in practice)
if num_points > 0:
# used for delaunay or plotting:\n",
points = np.array([data_points[cell.indices[i]].xystar for i in range(num_points)])
centroid = (sum(points[:,0])/num_points, sum(points[:,1])/num_points)
dp = DataPoint()
dp.xystar = centroid
dp.updateUv()
index = len(data_points)
data_points.append(dp)
# create triangle fan:
pts = [(points[i], i, cell.indices[i], math.atan2((points[i]-centroid)[1], (points[i]-centroid)[0])) for i in range(num_points)]
pts = sorted(pts, key=lambda pt: pt[3])
# print('sorted {}'.format([pts[i][2] for i in range(num_points)]))
cell.indices = [index] + [pts[i][2] for i in range(num_points)]
# print('indices: {}'.format(cell.indices))
num_points = num_points + 1;
# do that again with the new sort order:
# points = np.array([data_points[cell.indices[i]].xystar for i in range(num_points)])
# now try triangle fan again with right pivot
cell.triangles = [[0, i+1, i+2] for i in range(len(cell.indices)-2)]
if __name__ == "__main__":
# Parse command line options.
import argparse
parser = argparse.ArgumentParser(description='Generate spectrum_grid.h')
parser.add_argument('-s', '--scale', metavar='SCALE', type=float,
default=0.97,
dest='scale',
help='Scale grid points toward the EE white point using this factor. Defaults to 0.97.')
parser.add_argument('-sw', '--shortestW', metavar='SHORTESTW', type=int,
default=kShortestWavelength,
dest='shortest_w',
help='Shortest wavelength.')
parser.add_argument('-lw', '--longestW', metavar='LONGESTW', type=int,
default=kLongestWavelength,
dest='longest_w',
help='Longest wavelength.')
parser.add_argument('-r', '--resolution', metavar='RESOLUTION', type=int,
default=kWavelengthResolution,
dest='w_resolution',
help='Wavelength resolution.')
args = parser.parse_args()
if not kHavePlugins:
sys.exit(1)
kShortestWavelength = args.shortest_w
kLongestWavelength = args.longest_w
kWavelengthResolution = args.w_resolution
# Init xystar.
Cmf.initCmf()
Transform.initXystar()
# Generate the grid.
data_points, grid, grid_res, xystar_bbox = generateXystarGrid(args.scale)
# Init uv.
Transform.initUv(xystar_bbox, grid_res)
for dp in data_points:
dp.updateUv()
createTriangleFans(grid)
# Compute spectra and store in spectrum_data.h
output_file_name = 'spectral_transport_parameters_{0}_{1}_{2}.hpp'.format(
kShortestWavelength,
kLongestWavelength,
kWavelengthResolution)
computeSpectra(data_points)
writeSpectralTransportParameters(data_points, grid, grid_res, output_file_name)
| mit |
zihua/scikit-learn | examples/feature_selection/plot_f_test_vs_mi.py | 75 | 1647 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| bsd-3-clause |
rrohan/scikit-learn | examples/classification/plot_lda.py | 70 | 2413 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="Linear Discriminant Analysis with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="Linear Discriminant Analysis", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('Linear Discriminant Analysis vs. \
shrinkage Linear Discriminant Analysis (1 discriminative feature)')
plt.show()
| bsd-3-clause |
MTgeophysics/mtpy | mtpy/imaging/plot_mt_response.py | 1 | 63921 | # -*- coding: utf-8 -*-
"""
=================
plot_mt_response
=================
Plots the resistivity and phase for different modes and components
Created 2017
@author: jpeacock
"""
# ==============================================================================
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import matplotlib.colors as colors
import matplotlib.patches as patches
import matplotlib.colorbar as mcb
import matplotlib.gridspec as gridspec
import mtpy.imaging.mtcolors as mtcl
# ==============================================================================
# Plot apparent resistivity and phase
# ==============================================================================
from mtpy import MtPyLog
from mtpy.imaging.mtplottools import PlotSettings
class PlotMTResponse(PlotSettings):
"""
Plots Resistivity and phase for the different modes of the MT response. At
the moment it supports the input of an .edi file. Other formats that will
be supported are the impedance tensor and errors with an array of periods
and .j format.
The normal use is to input an .edi file, however it would seem that not
everyone uses this format, so you can input the data and put it into
arrays or objects like class mtpy.core.z.Z. Or if the data is in
resistivity and phase format they can be input as arrays or a class
mtpy.imaging.mtplot.ResPhase. Or you can put it into a class
mtpy.imaging.mtplot.MTplot.
The plot places the apparent resistivity in log scale in the top panel(s),
depending on the plot_num. The phase is below this, note that 180 degrees
has been added to the yx phase so the xy and yx phases plot in the same
quadrant. Both the resistivity and phase share the same x-axis which is in
log period, short periods on the left to long periods on the right. So
if you zoom in on the plot both plots will zoom in to the same
x-coordinates. If there is tipper information, you can plot the tipper
as a third panel at the bottom, and also shares the x-axis. The arrows are
in the convention of pointing towards a conductor. The xx and yy
components can be plotted as well, this adds two panels on the right.
Here the phase is left unwrapped. Other parameters can be added as
subplots such as strike, skew and phase tensor ellipses.
To manipulate the plot you can change any of the attributes listed below
and call redraw_plot(). If you know more aout matplotlib and want to
change axes parameters, that can be done by changing the parameters in the
axes attributes and then call update_plot(), note the plot must be open.
Arguments:
----------
**fn**: string
filename containing impedance (.edi) is the only
format supported at the moment
**z_array**: np.ndarray((nf, 2, 2), dtype='complex')
impedance tensor with length of nf -> the number of freq
*default* is None
**z_err_array**: np.ndarray((nf, 2, 2), dtype='real')
impedance tensor error estimates, same shape as z.
*default* is None
**res_array**: np.ndarray((nf, 2, 2))
array of resistivity values in linear scale.
*default* is None
**res_err_array**: np.ndarray((nf, 2, 2))
array of resistivity error estimates, same shape
as res_array. *default* is None
**phase_array**: np.ndarray((nf, 2, 2))
array of phase values in degrees, same shape as
res_array. *default* is None
**phase_err_array**: np.ndarray((nf, 2, 2))
array of phase error estimates, same shape as
phase_array. *default* is None
**tipper_array**: np.ndarray((nf, 1, 2), dtype='complex')
array of tipper values for tx, ty. *default* is None
**tipper_err_array**: np.ndarray((nf, 1, 2))
array of tipper error estimates, same shape as
tipper_array. *default* is None
**z_object**: class mtpy.core.z.Z
object of mtpy.core.z. If this is input be sure the
attribute z.freq is filled. *default* is None
**tipper_object**: class mtpy.core.z.Tipper
object of mtpy.core.z. If this is input be sure the
attribute z.freq is filled.
*default* is None
**mt_object** : class mtpy.imaging.mtplottools.MTplot
object of mtpy.imaging.mtplottools.MTplot
*default* is None
Optional Key Words:
--------------------
*fig_num*: int
figure number
*default* is 1
*fig_size*: [width, height] in inches of actual figure size
*ffactor*: float
scaling factor for computing resistivity from
impedances.
*Default* is 1
*rotation_angle*: float
rotation angle of impedance tensor (deg or radians),
*Note* : rotaion is clockwise positive
*default* is 0
*plot_num*: [ 1 | 2 | 3 ]
* 1 for just Ex/By and Ey/Bx *default*
* 2 for all 4 components
* 3 for off diagonal plus the determinant
*plot_title*: string
plot_title of plot
*default* is station name
*plot_tipper*: [ 'yri' | 'yr' | 'yi' | 'n' ]
Plots the tipper in a bottom pannel
* 'yri' --> plots the real and imaginar parts
* 'yr' --> plots just the real part
* 'yi' --> plots just the imaginary part
*Note:* the convention is to point towards a
conductor. Can change this by setting the
parameter arrow_direction = 1.
*plot_strike*: [ 'y' | 1 | 2 | 3 | 'n' ]
Plots the strike angle from different parameters:
* 'y' --> plots strike angle determined from
the invariants of Weaver et al. [2000]
and the phase tensor of
Caldwell et al. [2004], if Tipper is
plotted the strike of the tipper is
also plotted.
* 1 --> plots strike angle determined from
the invariants of Weaver et al. [2000]
* 2 --> plots strike angle determined from
the phase tensor of
Caldwell et al. [2004]
* 3 --> plots strike angle determined from
the tipper
* 'n' --> doesn't plot the strike, *default*
*plot_skew*: [ 'y' | 'n' ]
plots the skew angle calculated from the phase tensor
* 'y' --> plots skew angle
* 'n' --> does not plot skew angle *default*
*plot_pt*: [ 'y' | 'n' ]
plots the phase tensor ellipses which have the properties
of ellipse_
* 'y' --> plots phase tensor ellipses
* 'n' --> does not plot ellipses *default*
*fig_dpi*: int
dots-per-inch resolution, *default* is 300
:Example: ::
>>> import mtpy.core.mt.MT
>>> import mtpy.imaging.plot_mt_response.PlotMTResponse
>>> edifile = r"/home/MT01/MT01.edi"
>>> mt_obj = MT(edifile)
>>> rp1 = PlotMTResponse(mt_obj.Z, plot_num=2)
>>> # plots all 4 components
>>> rp1 = PlotMTResponse(mt_obj.Z, t_object=mt_obj.Tipper, plot_tipper='yr')
>>> # plots the real part of the tipper
Attributes:
-----------
-fn filename to be plotted (only supports .edi so far)
-fig_num figure number for plotting
-fig_size size of figure in inches [width, height]
-plot_num plot type, see arguments for details
-plot_title title of the plot, *default* is station name
-fig_dpi Dots-per-inch resolution of plot, *default* is 300
-rotation_angle Rotate impedance tensor by this angle (deg) assuming
that North is 0 and angle is positive clockwise
-plot_tipper string to tell the program to plot tipper arrows or
not, see accepted values above in arguments
-plot_strike string or integer telling the program to plot the
strike angle, see values above in arguments (YG: not implemented)
-plot_skew string to tell the program to plot skew angle.
The skew is plotted in the same subplot as the strike
angle at the moment (YG: not implemented)
-period period array cooresponding to the impedance tensor
-font_size size of font for the axis ticklabels, note that the
axis labels will be font_size+2
-axr matplotlib.axes object for the xy,yx resistivity plot.
-axp matplotlib.axes object for the xy,yx phase plot
-axt matplotlib.axes object for the tipper plot
-ax2r matplotlib.axes object for the xx,yy resistivity plot
-ax2p matplotlib.axes object for the xx,yy phase plot
-axs matplotlib.axes object for the strike plot
-axs2 matplotlib.axes object for the skew plot
..
**Note:** that from these axes object you have control of the
plot. You can do this by changing any parameter in the
axes object and then calling update_plot()
-erxyr class matplotlib.container.ErrorbarContainer for
xy apparent resistivity.
-erxyp class matplotlib.container.ErrorbarContainer for
xy.
-eryxr class matplotlib.container.ErrorbarContainer for
yx apparent resistivity.
-eryxp class matplotlib.container.ErrorbarContainer for
yx phase.
..
**Note:** that from these line objects you can manipulate the
error bar properties and then call update_plot()
-xy_ls line style for xy and xx components, *default* is None
-yx_ls line style for yx and yy components, *default* is None
-det_ls line style for determinant, *default* is None
-xy_marker marker for xy and xx, *default* is squares
-yx_marker marker for yx and yy, *default* is circles
-det_marker marker for determinant, *default* is diamonds
-xy_color marker color for xy and xx, *default* is blue
-yx_color marker color for yx and yy, *default* is red
-det_color marker color for determinant, *default* is green
-xy_mfc marker face color for xy and xx, *default* is None
-yx_mfc marker face color for yx and yy, *default* is None
-det_mfc marker face color for determinant, *default* is None
-skew_marker marker for skew angle, *default* is 'd'
-skew_color color for skew angle, *default* is 'orange'
-strike_inv_marker marker for strike angle determined by invariants
*default* is '^'
-strike_inv_color color for strike angle determined by invaraiants
*default* is (.2, .2, .7)
-strike_pt_marker marker for strike angle determined by pt,
*default* is'v'
-strike_pt_color color for strike angle determined by pt
*default* is (.7, .2, .2)
-strike_tip_marker marker for strike angle determined by tipper
*default* is '>'
-strike_tip_color color for strike angle determined by tipper
*default* is (.2, .7, .2)
-marker_size size of marker in relative dimenstions, *default* is 2
-marker_lw line width of marker, *default* is 100./fig_dpi
-lw line width of line and errorbar lines
..
*For more on line and marker styles see matplotlib.lines.Line2D*
-arrow_lw line width of the arrow, *default* is 0.75
-arrow_head_width head width of the arrow, *default* is 0 for no arrow
head. Haven't found a good way to scale the arrow
heads in a log scale.
-arrow_head_length head width of the arrow, *default* is 0 for no arrow
head. Haven't found a good way to scale the arrow
heads in a log scale.
-arrow_color_real color of the real arrows, *default* is black
-arrow_color_imag color of the imaginary arrows, *default* is blue
-arrow_direction 0 for pointing towards a conductor and -1 for
pointing away from a conductor.
-x_limits limits on the x-limits (period), *default* is None
which will estimate the min and max from the data,
setting the min as the floor(min(period)) and the max
as ceil(max(period)). Input in linear scale if you
want to change the period limits, ie. (.1,1000)
-res_limits limits on the resistivity, *default* is None, which
will estimate the min and max from the data, rounding
to the lowest and highest increments to the power of 10
Input in linear scale if you want to change them,
ie. (1,10000). Note this only sets the xy and yx
components, not the xx and yy.
-phase_limits limits on the phase, *default* is (0,90) but will
adapt to the data if there is phase above 90 or below
0. Input in degrees. Note this only changes the xy
and yx components.
-phase_quadrant [ 1 | 3 ]
* 1 for both phases to be in 0 to 90,
* 3 for xy to be in 0-90 and yx to be in -180 to 270
-tipper_limits limits of the y-axis, *default* is (-1,1)
-skew_limits limits for skew angle, *default* is (-9,9)
-strike_limits limits for strike angle, *default* is (-90,90)
Methods:
--------
* *plot*: plots the pseudosection according to keywords
* *redraw_plot*: redraws the plot, use if you change some of the
attributes.
* *update_plot*: updates the plot, use if you change some of the
axes attributes, figure needs to be open to update.
* *save_plot*: saves the plot to given filepath.
"""
def __init__(self, z_object=None, t_object=None, pt_obj=None,
station='MT Response', **kwargs):
super(PlotMTResponse, self).__init__()
self._logger = MtPyLog.get_mtpy_logger(self.__class__.__module__ + "." + self.__class__.__name__)
self.Z = z_object
self.Tipper = t_object
self.pt = pt_obj
self.station = station
self.phase_quadrant = 1
self.plot_num = kwargs.pop('plot_num', 1)
self.rotation_angle = kwargs.pop('rotation_angle', 0)
if self.Tipper is not None:
self.plot_tipper = 'yri'
else:
self.plot_tipper = 'n'
if self.pt is not None:
self.plot_pt = 'y'
else:
self.plot_pt = 'n'
# set arrow properties
self.arrow_size = 1
self.arrow_head_length = 0.03
self.arrow_head_width = 0.03
self.arrow_lw = .5
self.arrow_threshold = 2
self.arrow_color_imag = 'b'
self.arrow_color_real = 'k'
self.arrow_direction = 0
# ellipse_properties
self.ellipse_size = 0.25
self.ellipse_range = (0, 90, 10)
self.ellipse_colorby = 'phimin'
self.ellipse_cmap = 'mt_bl2gr2rd'
self.ellipse_spacing = kwargs.pop('ellipse_spacing', 1)
if self.ellipse_size == 2 and self.ellipse_spacing == 1:
self.ellipse_size = 0.25
# figure properties:
self.fig_num = 1
self.fig_dpi = 150
self.fig_size = None
self.font_size = 7
self.marker_size = 3
self.marker_lw = .5
self.lw = .5
self.plot_title = None
# line styles:
self.xy_ls = ':'
self.yx_ls = ':'
self.det_ls = ':'
# marker styles:
self.xy_marker = 's'
self.yx_marker = 'o'
self.det_marker = 'v'
# marker color styles:
self.xy_color = (0, 0, .75)
self.yx_color = (.75, 0, 0)
self.det_color = (0, .75, 0)
# marker face color styles:
self.xy_mfc = (0, 0, .75)
self.yx_mfc = (.75, 0, 0)
self.det_mfc = (0, .75, 0)
# plot limits
self.x_limits = None
self.res_limits = None
self.phase_limits = None
self.tipper_limits = None
self.pt_limits = None
# layout params
self.show_resphase_xticklabels = False
self.plot_yn = 'y'
for key in list(kwargs.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
else:
self._logger.warn("Argument {}={} is not supported thus not been set.".format(key, kwargs[key]))
# plot on initializing
if self.plot_yn == 'y':
self.plot()
@property
def period(self):
"""
plot period
"""
if self.Z is not None:
return 1. / self.Z.freq
elif self.Tipper is not None:
return 1. / self.Tipper.freq
else:
return None
def plot(self, show=True, overlay_mt_obj=None):
"""
plotResPhase(filename,fig_num) will plot the apparent resistivity and
phase for a single station.
"""
if overlay_mt_obj is None: # original case, no overlay edis
Z2 = None
else:
Z2 = overlay_mt_obj.Z
label_dict = dict([(ii, '$10^{' + str(ii) + '}$') for ii in range(-20, 21)])
ckdict = {'phiminang': r'$\Phi_{min}$ (deg)',
'phimin': r'$\Phi_{min}$ (deg)',
'phimaxang': r'$\Phi_{max}$ (deg)',
'phimax': r'$\Phi_{max}$ (deg)',
'phidet': r'Det{$\Phi$} (deg)',
'skew': r'Skew (deg)',
'normalized_skew': r'Normalized Skew (deg)',
'ellipticity': r'Ellipticity',
'skew_seg': r'Skew (deg)',
'normalized_skew_seg': r'Normalized Skew (deg)',
'geometric_mean': r'$\sqrt{\Phi_{min} \cdot \Phi_{max}}$'}
if self.plot_tipper.find('y') == 0:
if self.Tipper is None or np.all(self.Tipper.tipper == 0 + 0j):
print('No Tipper data for station {0}'.format(self.station))
self.plot_tipper = 'n'
if self.plot_pt == 'y':
#if np.all(self.Z.z == 0 + 0j) or self.Z is None:
if self.pt is None: # no phase tensor object provided
print('No Tipper data for station {0}'.format(self.station))
self.plot_pt = 'n'
# set x-axis limits from short period to long period
if self.x_limits is None:
self.x_limits = (10 ** (np.floor(np.log10(self.period.min()))),
10 ** (np.ceil(np.log10((self.period.max())))))
if self.phase_limits is None:
pass
if self.res_limits is None:
self.res_limits = (10 ** (np.floor(
np.log10(min([np.nanmin(self.Z.res_xy),
np.nanmin(self.Z.res_yx)])))),
10 ** (np.ceil(
np.log10(max([np.nanmax(self.Z.res_xy),
np.nanmax(self.Z.res_yx)])))))
# set some parameters of the figure and subplot spacing
plt.rcParams['font.size'] = self.font_size
plt.rcParams['figure.subplot.bottom'] = .1
plt.rcParams['figure.subplot.top'] = .93
plt.rcParams['figure.subplot.left'] = .80
plt.rcParams['figure.subplot.right'] = .98
# set the font properties for the axis labels
fontdict = {'size': self.font_size + 2, 'weight': 'bold'}
# create a dictionary for the number of subplots needed
pdict = {'res': 0,
'phase': 1}
# start the index at 2 because resistivity and phase is permanent for
# now
index = 2
if self.plot_tipper.find('y') >= 0:
pdict['tip'] = index
index += 1
if self.plot_pt.find('y') >= 0:
pdict['pt'] = index
index += 1
# get number of rows needed
nrows = index
# set height ratios of the subplots
hr = [2, 1.5] + [1] * (len(list(pdict.keys())) - 2)
# create a grid to place the figures into, set to have 2 rows and 2
# columns to put any of the 4 components. Make the phase plot
# slightly shorter than the apparent resistivity plot and have the two
# close to eachother vertically. If there is tipper add a 3rd row and
# if there is strike add another row
gs = gridspec.GridSpec(nrows, 2, height_ratios=hr, hspace=.05)
# make figure instance
self.fig = plt.figure(self.fig_num, self.fig_size, dpi=self.fig_dpi)
# --> make figure for xy,yx components
if self.plot_num == 1 or self.plot_num == 3:
# set label coordinates
labelcoords = (-0.075, 0.5)
# space out the subplots
gs.update(hspace=.05, wspace=.15, left=.1)
# --> create the axes instances
# apparent resistivity axis
self.axr = self.fig.add_subplot(gs[0, :])
# phase axis that shares period axis with resistivity
self.axp = self.fig.add_subplot(gs[1, :], sharex=self.axr)
# --> make figure for all 4 components
elif self.plot_num == 2:
# set label coordinates
labelcoords = (-0.095, 0.5)
# space out the subplots
gs.update(hspace=.05, wspace=.15, left=.07)
# --> create the axes instances
# apparent resistivity axis
self.axr = self.fig.add_subplot(gs[0, 0])
# phase axis that shares period axis with resistivity
self.axp = self.fig.add_subplot(gs[1, 0], sharex=self.axr)
# place y coordinate labels in the same location
self.axr.yaxis.set_label_coords(labelcoords[0], labelcoords[1])
self.axp.yaxis.set_label_coords(labelcoords[0], labelcoords[1])
# --> plot tipper
try:
self.axt = self.fig.add_subplot(gs[pdict['tip'], :], )
self.axt.yaxis.set_label_coords(labelcoords[0], labelcoords[1])
except KeyError:
pass
# --> plot phase tensors
try:
# can't share axis because not on the same scale
self.axpt = self.fig.add_subplot(gs[pdict['pt'], :],
aspect='equal')
self.axpt.yaxis.set_label_coords(labelcoords[0], labelcoords[1])
except KeyError:
pass
nz_xx = np.nonzero(self.Z.z[:, 0, 0])
nz_xy = np.nonzero(self.Z.z[:, 0, 1])
nz_yx = np.nonzero(self.Z.z[:, 1, 0])
nz_yy = np.nonzero(self.Z.z[:, 1, 1])
if self.Tipper is not None: # fix github issue #24.
# NOTE the following lines seems not have any effect anyway
nz_tx = np.nonzero(self.Tipper.tipper[:, 0, 0])
nz_ty = np.nonzero(self.Tipper.tipper[:, 0, 1])
# ---------plot the apparent resistivity--------------------------------
# --> plot as error bars and just as points xy, yx
# res_xy
self.ebxyr = self.axr.errorbar(self.period[nz_xy],
self.Z.res_xy[nz_xy],
marker=self.xy_marker,
ms=self.marker_size,
mew=self.lw,
mec=self.xy_color,
color=self.xy_color,
ecolor=self.xy_color,
ls=self.xy_ls,
lw=self.lw,
yerr=self.Z.res_err_xy[nz_xy],
capsize=self.marker_size,
capthick=self.lw)
# FZ: overlay edi logic
if(Z2 is not None):
self.ebxyr2=self.axr.errorbar(self.period[nz_xy],
Z2.res_xy[nz_xy],
marker=self.xy_marker,
ms=0.5*self.marker_size,
mew=self.lw,
mec=self.xy_color, # (0.5,0.5,0.9),
color=self.xy_color,
ecolor=self.xy_color,
ls=self.xy_ls,
lw=0.5*self.lw,
yerr=Z2.res_err_xy[nz_xy],
capsize=self.marker_size,
capthick=self.lw)
# res_yx
self.ebyxr = self.axr.errorbar(self.period[nz_yx],
self.Z.res_yx[nz_yx],
marker=self.yx_marker,
ms=self.marker_size,
mew=self.lw,
mec=self.yx_color,
color=self.yx_color,
ecolor=self.yx_color,
ls=self.yx_ls,
lw=self.lw,
yerr=self.Z.res_err_yx[nz_yx],
capsize=self.marker_size,
capthick=self.lw)
if (Z2 is not None):
self.ebyxr2 = self.axr.errorbar(self.period[nz_yx],
Z2.res_yx[nz_yx],
marker=self.yx_marker,
ms=0.5*self.marker_size,
mew=self.lw,
mec=self.yx_color,
color=self.yx_color,
ecolor=self.yx_color,
ls=self.yx_ls,
lw=self.lw,
yerr=Z2.res_err_yx[nz_yx],
capsize=0.5*self.marker_size,
capthick=self.lw)
# --> set axes properties
plt.setp(self.axr.get_xticklabels(), visible=False)
self.axr.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
self.axr.set_yscale('log', nonposy='clip')
self.axr.set_xscale('log', nonposx='clip')
self.axr.set_xlim(self.x_limits)
self.axr.set_ylim(self.res_limits)
self.axr.grid(True, alpha=.25, which='both', color=(.25, .25, .25),
lw=.25)
self.axr.legend((self.ebxyr[0], self.ebyxr[0]),
('$Z_{xy}$', '$Z_{yx}$'),
loc=3,
markerscale=1,
borderaxespad=.01,
labelspacing=.07,
handletextpad=.2,
borderpad=.02)
if Z2 is not None:
self.axr.legend((self.ebxyr[0], self.ebyxr[0], self.ebxyr2, self.ebyxr2),
('$Z_{xy}$', '$Z_{yx}$', '$Z2_{xy}$','$Z2_{yx}$'),
loc=3,
markerscale=1,
borderaxespad=.01,
labelspacing=.07,
handletextpad=.2,
borderpad=.02)
# -----Plot the phase---------------------------------------------------
# phase_xy
self.ebxyp = self.axp.errorbar(self.period[nz_xy],
self.Z.phase_xy[nz_xy],
marker=self.xy_marker,
ms=self.marker_size,
mew=self.lw,
mec=self.xy_color,
color=self.xy_color,
ecolor=self.xy_color,
ls=self.xy_ls,
lw=self.lw,
yerr=self.Z.phase_err_xy[nz_xy],
capsize=self.marker_size,
capthick=self.lw)
# phase_yx: Note add 180 to place it in same quadrant as phase_xy
self.ebyxp = self.axp.errorbar(self.period[nz_yx],
self.Z.phase_yx[nz_yx] + 180,
marker=self.yx_marker,
ms=self.marker_size,
mew=self.lw,
mec=self.yx_color,
color=self.yx_color,
ecolor=self.yx_color,
ls=self.yx_ls,
lw=self.lw,
yerr=self.Z.phase_err_yx[nz_yx],
capsize=self.marker_size,
capthick=self.lw)
# check the phase to see if any point are outside of [0:90]
if self.phase_limits is None:
if min(self.Z.phase_xy) < 0 or min(self.Z.phase_yx + 180) < 0:
pymin = min([min(self.Z.phase_xy), min(self.Z.phase_yx)])
if pymin > 0:
pymin = 0
else:
pymin = 0
if max(self.Z.phase_xy) > 90 or max(self.Z.phase_yx + 180) > 90:
pymax = min([max(self.Z.phase_xy), max(self.Z.phase_yx + 180)])
if pymax < 91:
pymax = 89.9
else:
pymax = 89.9
self.phase_limits = (pymin, pymax)
# --> set axes properties
self.axp.set_xlabel('Period (s)', fontdict)
self.axp.set_ylabel('Phase (deg)', fontdict)
self.axp.set_xscale('log', nonposx='clip')
self.axp.set_ylim(self.phase_limits)
self.axp.yaxis.set_major_locator(MultipleLocator(15))
self.axp.yaxis.set_minor_locator(MultipleLocator(5))
self.axp.grid(True, alpha=.25, which='both', color=(.25, .25, .25),
lw=.25)
# set th xaxis tick labels to invisible
if self.plot_tipper.find('y') >= 0 or self.plot_pt == 'y':
plt.setp(self.axp.xaxis.get_ticklabels(), visible=False)
self.axp.set_xlabel('')
# -----plot tipper----------------------------------------------------
if self.plot_tipper.find('y') == 0:
txr = self.Tipper.mag_real * np.sin(self.Tipper.angle_real * np.pi / 180 + \
np.pi * self.arrow_direction)
tyr = self.Tipper.mag_real * np.cos(self.Tipper.angle_real * np.pi / 180 + \
np.pi * self.arrow_direction)
txi = self.Tipper.mag_imag * np.sin(self.Tipper.angle_imag * np.pi / 180 + \
np.pi * self.arrow_direction)
tyi = self.Tipper.mag_imag * np.cos(self.Tipper.angle_imag * np.pi / 180 + \
np.pi * self.arrow_direction)
nt = len(txr)
tiplist = []
tiplabel = []
for aa in range(nt):
xlenr = txr[aa] * np.log10(self.period[aa])
xleni = txi[aa] * np.log10(self.period[aa])
# --> plot real arrows
if self.plot_tipper.find('r') > 0:
self.axt.arrow(np.log10(self.period[aa]),
0,
xlenr,
tyr[aa],
lw=self.arrow_lw,
facecolor=self.arrow_color_real,
edgecolor=self.arrow_color_real,
head_width=self.arrow_head_width,
head_length=self.arrow_head_length,
length_includes_head=False)
if aa == 0:
line1 = self.axt.plot(0, 0, self.arrow_color_real)
tiplist.append(line1[0])
tiplabel.append('real')
# --> plot imaginary arrows
if self.plot_tipper.find('i') > 0:
self.axt.arrow(np.log10(self.period[aa]),
0,
xleni,
tyi[aa],
lw=self.arrow_lw,
facecolor=self.arrow_color_imag,
edgecolor=self.arrow_color_imag,
head_width=self.arrow_head_width,
head_length=self.arrow_head_length,
length_includes_head=False)
if aa == 0:
line2 = self.axt.plot(0, 0, self.arrow_color_imag)
tiplist.append(line2[0])
tiplabel.append('imag')
# make a line at 0 for reference
self.axt.plot(np.log10(self.period), [0] * nt, 'k', lw=.5)
self.axt.legend(tiplist, tiplabel,
loc='upper left',
markerscale=1,
borderaxespad=.01,
labelspacing=.07,
handletextpad=.2,
borderpad=.1,
prop={'size': self.font_size})
# set axis properties
self.axt.set_xlim(np.log10(self.x_limits[0]),
np.log10(self.x_limits[1]))
tklabels = []
xticks = []
for tk in self.axt.get_xticks():
try:
tklabels.append(label_dict[tk])
xticks.append(tk)
except KeyError:
pass
self.axt.set_xticks(xticks)
self.axt.set_xticklabels(tklabels,
fontdict={'size': self.font_size})
self.axt.set_xlabel('Period (s)', fontdict=fontdict)
# need to reset the x_limits caouse they get reset when calling
# set_ticks for some reason
self.axt.set_xlim(np.log10(self.x_limits[0]),
np.log10(self.x_limits[1]))
self.axt.yaxis.set_major_locator(MultipleLocator(.2))
self.axt.yaxis.set_minor_locator(MultipleLocator(.1))
self.axt.set_xlabel('Period (s)', fontdict=fontdict)
self.axt.set_ylabel('Tipper', fontdict=fontdict)
# self.axt.set_xscale('log', nonposx='clip')
if self.tipper_limits is None:
tmax = max([np.nanmax(tyr), np.nanmax(tyi)])
if tmax > 1:
tmax = .899
tmin = min([np.nanmin(tyr), np.nanmin(tyi)])
if tmin < -1:
tmin = -.899
self.tipper_limits = (tmin - .1, tmax + .1)
self.axt.set_ylim(self.tipper_limits)
self.axt.grid(True, alpha=.25, which='both', color=(.25, .25, .25),
lw=.25)
# set th xaxis tick labels to invisible
if self.plot_pt == 'y':
plt.setp(self.axt.xaxis.get_ticklabels(), visible=False)
self.axt.set_xlabel('')
# ----plot phase tensor ellipse---------------------------------------
if self.plot_pt == 'y':
cmap = self.ellipse_cmap
ckmin = self.ellipse_range[0]
ckmax = self.ellipse_range[1]
try:
ckstep = float(self.ellipse_range[2])
except IndexError:
ckstep = 3
if cmap == 'mt_seg_bl2wh2rd':
bounds = np.arange(ckmin, ckmax + ckstep, ckstep)
nseg = float((ckmax - ckmin) / (2 * ckstep))
# get the properties to color the ellipses by
if self.ellipse_colorby == 'phiminang' or \
self.ellipse_colorby == 'phimin':
colorarray = self.pt.phimin
elif self.ellipse_colorby == 'phimaxang' or \
self.ellipse_colorby == 'phimax':
colorarray = self.pt.phimax
elif self.ellipse_colorby == 'phidet':
colorarray = np.sqrt(abs(self.pt.det)) * (180 / np.pi)
elif self.ellipse_colorby == 'skew' or \
self.ellipse_colorby == 'skew_seg':
colorarray = self.pt.beta
elif self.ellipse_colorby == 'ellipticity':
colorarray = self.pt.ellipticity
else:
raise NameError(self.ellipse_colorby + ' is not supported')
# -------------plot ellipses-----------------------------------
for ii, ff in enumerate(self.period):
# make sure the ellipses will be visable
eheight = self.pt.phimin[ii] / self.pt.phimax[ii] * \
self.ellipse_size
ewidth = self.pt.phimax[ii] / self.pt.phimax[ii] * \
self.ellipse_size
# create an ellipse scaled by phimin and phimax and oriented
# along the azimuth which is calculated as clockwise but needs
# to be plotted counter-clockwise hence the negative sign.
ellipd = patches.Ellipse((np.log10(ff) * self.ellipse_spacing,
0),
width=ewidth,
height=eheight,
angle=90 - self.pt.azimuth[ii])
self.axpt.add_patch(ellipd)
# get ellipse color
if cmap.find('seg') > 0:
ellipd.set_facecolor(mtcl.get_plot_color(colorarray[ii],
self.ellipse_colorby,
cmap,
ckmin,
ckmax,
bounds=bounds))
else:
ellipd.set_facecolor(mtcl.get_plot_color(colorarray[ii],
self.ellipse_colorby,
cmap,
ckmin,
ckmax))
# ----set axes properties-----------------------------------------------
# --> set tick labels and limits
self.axpt.set_xlim(np.log10(self.x_limits[0]),
np.log10(self.x_limits[1]))
tklabels = []
xticks = []
for tk in self.axpt.get_xticks():
try:
tklabels.append(label_dict[tk])
xticks.append(tk)
except KeyError:
pass
self.axpt.set_xticks(xticks)
self.axpt.set_xticklabels(tklabels,
fontdict={'size': self.font_size})
self.axpt.set_xlabel('Period (s)', fontdict=fontdict)
self.axpt.set_ylim(ymin=-1.5 * self.ellipse_size,
ymax=1.5 * self.ellipse_size)
# need to reset the x_limits caouse they get reset when calling
# set_ticks for some reason
self.axpt.set_xlim(np.log10(self.x_limits[0]),
np.log10(self.x_limits[1]))
self.axpt.grid(True,
alpha=.25,
which='major',
color=(.25, .25, .25),
lw=.25)
plt.setp(self.axpt.get_yticklabels(), visible=False)
if pdict['pt'] != nrows - 1:
plt.setp(self.axpt.get_xticklabels(), visible=False)
# add colorbar for PT
axpos = self.axpt.get_position()
cb_position = (axpos.bounds[0] - .0575,
axpos.bounds[1] + .02,
.01,
axpos.bounds[3] * .75)
self.cbax = self.fig.add_axes(cb_position)
if cmap == 'mt_seg_bl2wh2rd':
# make a color list
clist = [(cc, cc, 1)
for cc in np.arange(0, 1 + 1. / (nseg), 1. / (nseg))] + \
[(1, cc, cc)
for cc in np.arange(1, -1. / (nseg), -1. / (nseg))]
# make segmented colormap
mt_seg_bl2wh2rd = colors.ListedColormap(clist)
# make bounds so that the middle is white
bounds = np.arange(ckmin - ckstep, ckmax + 2 * ckstep, ckstep)
# normalize the colors
norms = colors.BoundaryNorm(bounds, mt_seg_bl2wh2rd.N)
# make the colorbar
self.cbpt = mcb.ColorbarBase(self.cbax,
cmap=mt_seg_bl2wh2rd,
norm=norms,
orientation='vertical',
ticks=bounds[1:-1])
else:
self.cbpt = mcb.ColorbarBase(self.cbax,
cmap=mtcl.cmapdict[cmap],
norm=colors.Normalize(vmin=ckmin,
vmax=ckmax),
orientation='vertical')
self.cbpt.set_ticks([ckmin, (ckmax - ckmin) / 2, ckmax])
self.cbpt.set_ticklabels(['{0:.0f}'.format(ckmin),
'{0:.0f}'.format((ckmax - ckmin) / 2),
'{0:.0f}'.format(ckmax)])
self.cbpt.ax.yaxis.set_label_position('left')
self.cbpt.ax.yaxis.set_label_coords(-1.05, .5)
self.cbpt.ax.yaxis.tick_right()
self.cbpt.ax.tick_params(axis='y', direction='in')
self.cbpt.set_label(ckdict[self.ellipse_colorby],
fontdict={'size': self.font_size})
# ===Plot the xx, yy components if desired==============================
if self.plot_num == 2:
# ---------plot the apparent resistivity----------------------------
self.axr2 = self.fig.add_subplot(gs[0, 1], sharex=self.axr)
self.axr2.yaxis.set_label_coords(-.1, 0.5)
# res_xx
self.ebxxr = self.axr2.errorbar(self.period[nz_xx],
self.Z.res_xx[nz_xx],
marker=self.xy_marker,
ms=self.marker_size,
mew=self.lw,
mec=self.xy_color,
color=self.xy_color,
ecolor=self.xy_color,
ls=self.xy_ls,
lw=self.lw,
yerr=self.Z.res_err_xx[nz_xx],
capsize=self.marker_size,
capthick=self.lw)
# res_yy
self.ebyyr = self.axr2.errorbar(self.period[nz_yy],
self.Z.res_yy[nz_yy],
marker=self.yx_marker,
ms=self.marker_size,
mew=self.lw,
mec=self.yx_color,
color=self.yx_color,
ecolor=self.yx_color,
ls=self.yx_ls,
lw=self.lw,
yerr=self.Z.res_err_yy[nz_yy],
capsize=self.marker_size,
capthick=self.lw)
if Z2 is not None:
# res_xx of Z2, with smaller marker size
self.ebxxr2 = self.axr2.errorbar(self.period[nz_xx],
Z2.res_xx[nz_xx],
marker=self.xy_marker,
ms=0.5*self.marker_size,
mew=self.lw,
mec=self.xy_color,
color=self.xy_color,
ecolor=self.xy_color,
ls=self.xy_ls,
lw=self.lw,
yerr=Z2.res_err_xx[nz_xx],
capsize=0.5*self.marker_size,
capthick=self.lw)
# res_yy
self.ebyyr2 = self.axr2.errorbar(self.period[nz_yy],
Z2.res_yy[nz_yy],
marker=self.yx_marker,
ms=0.5*self.marker_size,
mew=self.lw,
mec=self.yx_color,
color=self.yx_color,
ecolor=self.yx_color,
ls=self.yx_ls,
lw=self.lw,
yerr=Z2.res_err_yy[nz_yy],
capsize=0.5*self.marker_size,
capthick=self.lw)
# --> set axes properties
plt.setp(self.axr2.get_xticklabels(), visible=False)
self.axr2.set_yscale('log', nonposy='clip')
self.axr2.set_xscale('log', nonposx='clip')
self.axr2.set_xlim(self.x_limits)
self.axr2.grid(True, alpha=.25,
which='both',
color=(.25, .25, .25),
lw=.25)
if Z2 is None:
self.axr2.legend((self.ebxxr[0], self.ebyyr[0]),
('$Z_{xx}$', '$Z_{yy}$'),
loc=3, markerscale=1,
borderaxespad=.01,
labelspacing=.07,
handletextpad=.2,
borderpad=.02)
else:
self.axr2.legend((self.ebxxr[0], self.ebyyr[0], self.ebxxr2[0], self.ebyyr2[0]),
('$Z_{xx}$', '$Z_{yy}$', '$Z2_{xx}$', '$Z2_{yy}$'),
loc=3, markerscale=1,
borderaxespad=.01,
labelspacing=.07,
handletextpad=.2,
borderpad=.02)
# -----Plot the phase-----------------------------------------------
self.axp2 = self.fig.add_subplot(gs[1, 1], sharex=self.axr)
self.axp2.yaxis.set_label_coords(-.1, 0.5)
# phase_xx
self.ebxxp = self.axp2.errorbar(self.period[nz_xx],
self.Z.phase_xx[nz_xx],
marker=self.xy_marker,
ms=self.marker_size,
mew=self.lw,
mec=self.xy_color,
color=self.xy_color,
ecolor=self.xy_color,
ls=self.xy_ls,
lw=self.lw,
yerr=self.Z.phase_err_xx[nz_xx],
capsize=self.marker_size,
capthick=self.lw)
# phase_yy
self.ebyyp = self.axp2.errorbar(self.period[nz_yy],
self.Z.phase_yy[nz_yy],
marker=self.yx_marker,
ms=self.marker_size,
mew=self.lw,
mec=self.yx_color,
color=self.yx_color,
ecolor=self.yx_color,
ls=self.yx_ls,
lw=self.lw,
yerr=self.Z.phase_err_yy[nz_yy],
capsize=self.marker_size,
capthick=self.lw)
# --> set axes properties
self.axp2.set_xlabel('Period (s)', fontdict)
self.axp2.set_xscale('log', nonposx='clip')
self.axp2.set_ylim(ymin=-179.9, ymax=179.9)
self.axp2.yaxis.set_major_locator(MultipleLocator(30))
self.axp2.yaxis.set_minor_locator(MultipleLocator(5))
self.axp2.grid(True, alpha=.25,
which='both',
color=(.25, .25, .25),
lw=.25)
if len(list(pdict.keys())) > 2:
plt.setp(self.axp2.xaxis.get_ticklabels(), visible=False)
plt.setp(self.axp2.xaxis.get_label(), visible=False)
# ===Plot the Determinant if desired==================================
if self.plot_num == 3:
# res_det
self.ebdetr = self.axr.errorbar(self.period,
self.Z.res_det,
marker=self.det_marker,
ms=self.marker_size,
mew=self.lw,
mec=self.det_color,
color=self.det_color,
ecolor=self.det_color,
ls=self.det_ls,
lw=self.lw,
yerr=self.Z.res_det_err,
capsize=self.marker_size,
capthick=self.lw)
if Z2 is not None:
self.ebdetr2 = self.axr.errorbar(self.period,
Z2.res_det,
marker=self.det_marker,
ms=0.5*self.marker_size,
mew=self.lw,
mec=self.det_color,
color=self.det_color,
ecolor=self.det_color,
ls=self.det_ls,
lw=self.lw,
yerr=Z2.res_det_err,
capsize=0.5*self.marker_size,
capthick=self.lw)
# phase_det
self.ebdetp = self.axp.errorbar(self.period,
self.Z.phase_det,
marker=self.det_marker,
ms=self.marker_size,
mew=self.lw,
mec=self.det_color,
color=self.det_color,
ecolor=self.det_color,
ls=self.det_ls,
lw=self.lw,
yerr=self.Z.phase_det_err,
capsize=self.marker_size,
capthick=self.lw)
self.axr.legend((self.ebxyr[0], self.ebyxr[0], self.ebdetr[0]),
('$Z_{xy}$', '$Z_{yx}$', '$\det(\mathbf{\hat{Z}})$'),
loc=3,
markerscale=1,
borderaxespad=.01,
labelspacing=.07,
handletextpad=.2,
borderpad=.02)
if Z2 is not None:
self.axr.legend((self.ebxyr[0], self.ebyxr[0], self.ebdetr[0], self.ebxyr2[0], self.ebyxr2[0], self.ebdetr2[0],),
('$Z_{xy}$', '$Z_{yx}$','$\det(\mathbf{\hat{Z}})$', '$Z2_{xy}$','$Z2_{yx}$','$\det(\mathbf{\hat{Z2}})$'),
loc=3,
markerscale=1,
borderaxespad=.01,
labelspacing=.07,
handletextpad=.2,
borderpad=.02)
if self.show_resphase_xticklabels:
if self.plot_num in [1, 3]:
gs.update(hspace=0.2, wspace=.15, left=.1)
else:
gs.update(hspace=0.2, wspace=.15, left=.07)
plt.setp(self.axp2.xaxis.get_ticklabels(), visible=True)
plt.setp(self.axr2.xaxis.get_ticklabels(), visible=True)
self.axr2.tick_params(axis='x', pad=2, direction='in', which='both', labelsize=self.font_size - 1)
self.axp2.tick_params(axis='x', pad=2, direction='in', which='both', labelsize=self.font_size - 1)
self.axp2.set_xlabel('Period (s)', fontsize=self.font_size - 1, labelpad=0) #
plt.setp(self.axr.xaxis.get_ticklabels(), visible=True)
plt.setp(self.axp.xaxis.get_ticklabels(), visible=True)
self.axr.tick_params(axis='x', pad=2, direction='in', which='both', labelsize=self.font_size - 1)
self.axp.tick_params(axis='x', pad=2, direction='in', which='both', labelsize=self.font_size - 1)
# self.axp.set_xlabel('Period (s)',fontsize=self.font_size-2,labelpad=0)
# make plot_title and show
if self.plot_title is None:
self.plot_title = self.station
self.fig.suptitle(self.plot_title, fontdict=fontdict)
# be sure to show
if show:
plt.show()
def save_plot(self, save_fn, file_format='pdf', orientation='portrait',
fig_dpi=None, close_plot='y'):
"""
save_plot will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_ResPhase.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the fig_dpi will be that at
which the figure was made. I don't think that
it can be larger than fig_dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.imaging.mtplottools as mtplot
>>> p1 = mtplot.PlotResPhase(r'/home/MT/mt01.edi')
>>> p1.save_plot(r'/home/MT/figures', file_format='jpg')
"""
if fig_dpi is None:
fig_dpi = self.fig_dpi
if not os.path.isdir(save_fn):
file_format = save_fn[-3:]
else:
save_fn = os.path.join(save_fn, self.station + '_ResPhase.' +
file_format)
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation)
if close_plot == 'y':
plt.clf()
plt.close(self.fig)
else:
pass
self.fig_fn = save_fn
print('Saved figure to: ' + self.fig_fn)
def update_plot(self):
"""
update any parameters that where changed using the built-in draw from
canvas.
Use this if you change an of the .fig or axes properties
:Example: ::
>>> # to change the grid lines to only be on the major ticks
>>> import mtpy.imaging.mtplottools as mtplot
>>> p1 = mtplot.PlotResPhase(r'/home/MT/mt01.edi')
>>> [ax.grid(True, which='major') for ax in [p1.axr,p1.axp]]
>>> p1.update_plot()
"""
self.fig.canvas.draw()
def redraw_plot(self):
"""
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.imaging.mtplottools as mtplot
>>> p1 = mtplot.PlotResPhase(r'/home/MT/mt01.edi')
>>> p1.xy_color = (.5,.5,.9)
>>> p1.xy_marker = '*'
>>> p1.redraw_plot()
"""
plt.close(self.fig)
self.plot()
def __str__(self):
"""
rewrite the string builtin to give a useful message
"""
return "Plots Resistivity and phase for the different modes of the" + \
"MT response."
#########################################################
# Plot the data from one or two EDI files.
# python /c/Githubz/mtpy/mtpy/imaging/plot_mt_response.py /c/Githubz/mtpy/data/edifiles/15125A.edi
# OR
# python /c/Githubz/mtpy/mtpy/imaging/plot_mt_response.py /c/Githubz/mtpy/data/edifiles/15125A.edi /c/Githubz/mtpy/data/edifiles/15129A.edi
####################################################################
if __name__ == "__main__":
import sys
from mtpy.core.mt import MT
if len(sys.argv)<2:
print("USAGE: python %s edifile1 [edifile2]"%sys.argv[0])
sys.exit(1)
elif len(sys.argv) == 2: # one edi file provided
edifile = sys.argv[1] # r"C:/Githubz/mtpy/data/edifiles/15125A.edi"
mt_obj = MT(edifile)
rp1 = PlotMTResponse(z_object=mt_obj.Z, # this is mandatory
# t_object=mt_obj.Tipper,
# pt_obj=mt_obj.pt,
station=mt_obj.station,
plot_tipper='yr', # plots the real part of the tipper
plot_num=3) # plot_num =1 xy + yx; 2 all; 3 xy yx det
# rp1.xy_color = (.5,.5,.9)
# rp1.xy_marker = '*'
# rp1.redraw_plot()
elif(len(sys.argv)==3): # overlay 2 edi files provided
edifile = sys.argv[1] # r"C:/Githubz/mtpy/data/edifiles/15125A.edi"
mt_obj = MT(edifile)
edifile2 = sys.argv[2] # r"C:/Githubz/mtpy/data/edifiles/15126A.edi"
mt_obj2 = MT(edifile2)
rp1 = PlotMTResponse(z_object=mt_obj.Z, # this is mandatory
# t_object=mt_obj.Tipper,
# pt_obj=mt_obj.pt,
station=mt_obj.station ,
plot_yn='n',
plot_tipper='yr', # plots the real part of the tipper
plot_num=3) # plot_num =1 xy + yx; 2 all; 3 xy yx det
rp1.station = rp1.station + " and " + mt_obj2.station
rp1.plot(overlay_mt_obj=mt_obj2)
| gpl-3.0 |
empeeu/numpy | numpy/core/tests/test_multiarray.py | 2 | 218049 | from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import os
import shutil
import warnings
import operator
import io
import itertools
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from nose import SkipTest
from numpy.core import *
from numpy.compat import asbytes, getexception, strchar, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
assert_array_less, runstring, dec
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and suboffsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed bytewise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = arange(10)
self.two = arange(20).reshape(4, 5)
self.three = arange(60, dtype=float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, dtype(int_))
assert_equal(self.three.dtype, dtype(float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides=strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = empty((3, 2, 1), t)
y = empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = empty((3, 2, 1), dtype=uint64)
y = empty((3, 2, 1), dtype=uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = dtype('i4')
assert_equal(d1, dtype(int32))
d2 = dtype('f8')
assert_equal(d2, dtype(float64))
def test_byteorders(self):
self.assertNotEqual(dtype('<i4'), dtype('>i4'))
self.assertNotEqual(dtype([('a', '<i4')]), dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = array(0), array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[array([], int)], a)
self.assertRaises(IndexError, lambda x: x[array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[newaxis].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ...].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ..., newaxis].shape, (1, 1))
self.assertEqual(a[..., newaxis, newaxis].shape, (1, 1))
self.assertEqual(a[newaxis, newaxis, ...].shape, (1, 1))
self.assertEqual(a[(newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i): x[i]
self.assertRaises(IndexError, subscript, a, (newaxis, 0))
self.assertRaises(IndexError, subscript, a, (newaxis,)*50)
def test_constructor(self):
x = ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = array(2)
self.assertRaises(ValueError, add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[newaxis].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ...].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ..., newaxis].shape, (1, 1))
self.assertEqual(a[..., newaxis, newaxis].shape, (1, 1))
self.assertEqual(a[newaxis, newaxis, ...].shape, (1, 1))
self.assertEqual(a[(newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i): x[i]
self.assertRaises(IndexError, subscript, a, (newaxis, 0))
self.assertRaises(IndexError, subscript, a, (newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, array, x())
def test_from_string(self) :
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = array([123, 123], dtype=int)
for type in types :
msg = 'String conversion for %s' % type
assert_equal(array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the sytem
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = zeros((3,3), dtype=c)
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = zeros((3,3), dtype='S5')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='U5')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='<i4')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='>i4')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='<M8[s]')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='>M8[s]')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='f4,f4')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a==b, [True, True])
assert_equal(a!=b, [False, False])
b[1].b = 'c'
assert_equal(a==b, [True, False])
assert_equal(a!=b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a==b, [False, False])
assert_equal(a!=b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a==b, [False, True])
assert_equal(a!=b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a==b, [[True, True, False], [False, False, True]])
assert_equal(b==a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a==b, [[True, True, False], [False, False, True]])
assert_equal(b==a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a==b, [[True, False, False], [False, False, True]])
assert_equal(b==a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a==b, [[True, False, False], [False, False, True]])
assert_equal(b==a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
new2 = dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
#check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
class TestBool(TestCase):
def test_test_interning(self):
a0 = bool_(0)
b0 = bool_(False)
self.assertTrue(a0 is b0)
a1 = bool_(1)
b1 = bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(array([True])[0] is a1)
self.assertTrue(array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool);
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(array([1.2, 1.5]), [1, 2])
check_round(array(1.5), 2)
check_round(array([12.2, 15.5]), [10, 20], -1)
check_round(array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(array([4.5 + 1.5j]), [4 + 2j])
check_round(array([12.5 + 15.5j]), [10 + 20j], -1)
def test_transpose(self):
a = array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "scalar sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h'] :
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h'] :
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
dtype = '{0}{1}'.format(endianess, dt)
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "string sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "unicode sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "object sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = array([(i, i) for i in range(101)], dtype = dt)
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "object sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "datetime64 sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1=np.array([21, 32, 14])
x2=np.array(['my', 'first', 'name'])
x3=np.array([3.1, 4.5, 6.2])
r=np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, array([14, 21, 32]))
assert_equal(r.word, array(['name', 'my', 'first']))
assert_equal(r.number, array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, array([32, 21, 14]))
assert_equal(r.word, array(['first', 'my', 'name']))
assert_equal(r.number, array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, array([21, 32, 14]))
assert_equal(r.word, array(['my', 'first', 'name']))
assert_equal(r.number, array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype= mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h'] :
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h'] :
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
dtype = '{0}{1}'.format(endianess, dt)
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = array([(i, i) for i in range(101)], dtype = dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm'] :
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm'] :
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# using None is known fail at this point
#assert_equal(a.copy().argsort(axis=None, c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones((1))
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones((50))
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange((49))
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange((47))[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10); d[1] = 4;
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange((47)) % 7
tgt = np.sort(np.arange((47)) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i,:], np.array([i] * d1.shape[0],
dtype=dt))
# array_less does not seem to work right
at((p[:i,:] <= p[i,:]).all(),
msg="%d: %r <= %r" % (i, p[i,:], p[:i,:]))
at((p[i + 1:,:] > p[i,:]).all(),
msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None,:]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# General case of possible ravel that is not contiguous but
# works and includes a 1-sized axis with non matching stride
a = a.swapaxes(-1, -2) # swap back to C-order
assert_(np.may_share_memory(a.ravel(order='C'), a))
assert_(np.may_share_memory(a.ravel(order='K'), a))
a = a.T # swap all to Fortran order
assert_(np.may_share_memory(a.ravel(order='F'), a))
assert_(np.may_share_memory(a.ravel(order='K'), a))
# Test negative strides:
a = np.arange(4)[::-1].reshape(2, 2)
assert_(np.may_share_memory(a.ravel(order='C'), a))
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# Test keeporder with weirdly strided 1-sized dims (1-d first stride)
a = np.arange(8)[::2].reshape(1, 2, 2, 1) # neither C, nor F order
strides = list(a.strides)
strides[0] = -12
strides[-1] = 0
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel('K'), a.ravel('C'))
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
#Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(5)
orig, res = incref_elide(d)
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(5)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(5))
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(ndarray):
pass
class OtherNdarraySubclassWithOverride(ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __rsub__(self, other):
return "no subs for me"
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply or ufunc is np.bitwise_and:
return "ufunc"
else:
inputs = list(inputs)
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
r = func(*inputs, **kw)
if 'out' in kw:
return r
else:
x = self.__class__(r.shape, dtype=r.dtype)
x[...] = r
return x
class SomeClass3(SomeClass2):
def __rsub__(self, other):
return "sub for me"
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
obj3 = SomeClass3((1,), dtype=np.int_)
obj3[0] = 4
# obj is first, so should get to define outcome.
assert_equal(obj * arr, 123)
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
assert_equal(arr * obj, 321)
# obj is second, but has __numpy_ufunc__ and defines __rsub__.
assert_equal(arr - obj, "no subs for me")
# obj is second, but has __numpy_ufunc__ and defines __lt__.
assert_equal(arr > obj, "nope")
# obj is second, but has __numpy_ufunc__ and defines __gt__.
assert_equal(arr < obj, "yep")
# Called as a ufunc, obj.__numpy_ufunc__ is used.
assert_equal(np.multiply(arr, obj), "ufunc")
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
arr *= obj
assert_equal(arr, 321)
# obj2 is an ndarray subclass, so CPython takes care of the same rules.
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr - obj2, "no subs for me")
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
# Called as a ufunc, obj2.__numpy_ufunc__ is called.
assert_equal(np.multiply(arr, obj2), "ufunc")
# Also when the method is not overridden.
assert_equal(arr & obj2, "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
# Obj3 is subclass that defines __rsub__. CPython calls it.
assert_equal(arr - obj3, "sub for me")
assert_equal(obj2 - obj3, "sub for me")
# obj3 is a subclass that defines __rmul__. CPython calls it.
assert_equal(arr * obj3, 321)
# But not here, since obj3.__rmul__ is obj2.__rmul__.
assert_equal(obj2 * obj3, 123)
# And of course, here obj3.__mul__ should be called.
assert_equal(obj3 * obj2, 123)
# obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
# (and both are just ndarray.__radd__); see #4815.
res = obj2 + obj3
assert_equal(res, 46)
assert_(isinstance(res, SomeClass2))
# Since obj3 is a subclass, it should have precedence, like CPython
# would give, even though obj2 has __numpy_ufunc__ and __radd__.
# See gh-4815 and gh-5747.
res = obj3 + obj2
assert_equal(res, 46)
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
transpose(carray),
array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return loads(obj, encoding='latin1')
else:
return loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = array([1, 2, 3, 4], dtype=int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = array([1.0, 2.0, 3.0, 4.0], dtype=float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = array([1, 2, 3, 4], dtype=int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = array([1.0, 2.0, 3.0, 4.0], dtype=float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, array([[2.0]]))
x = ones((1, 1, 1))
x[:,:, [0]] = 2.0
assert_array_equal(x, array([[[2.0]]]))
def test_tuple(self):
x = ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, array([[2.0]]))
x = ones((1, 1, 1))
x[:,:, (0,)] = 2.0
assert_array_equal(x, array([[[2.0]]]))
def test_mask(self):
x = array([1, 2, 3, 4])
m = array([0, 1, 0, 0], bool)
assert_array_equal(x[m], array([2]))
def test_mask2(self):
x = array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = array([0, 1], bool)
m2 = array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], array([2, 5]))
assert_array_equal(x[m3], array([2]))
def test_assign_mask(self):
x = array([1, 2, 3, 4])
m = array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = array([0, 1], bool)
m2 = array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = array(["This", "is", "example"])
g2 = array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = array([sixu("This"), sixu("is"), sixu("example")])
g2 = array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 3),
#(['zz', 'a', 'aa', 'a'], 0),
#(['aa', 'z', 'zz', 'a'], 2),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r"%arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r"%arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = zeros(2, dtype=int)
out2 = zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = zeros(3, dtype=int)
out2 = zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 0),
#(['zz', 'a', 'aa', 'a'], 1),
#(['aa', 'z', 'zz', 'a'], 3),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r"%arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r"%arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = zeros(2, dtype=int)
out2 = ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = zeros(3, dtype=int)
out2 = ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
class TestNewaxis(TestCase):
def test_basic(self):
sk = array([0, -0.1, 0.1])
res = 250*sk[:, newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self,type_group,array_max,
clip_min,clip_max,inplace=False,
expected_min=None,expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|': byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type('float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type('float', 1024, 0, 0, inplace=inplace)
self._clip_type('int', 1024, -120, 100.5, inplace=inplace)
self._clip_type('int', 1024, 0, 0, inplace=inplace)
x = self._clip_type('uint', 1024, -120, 100, expected_min=0,
inplace=inplace)
x = self._clip_type('uint', 1024, 0, 0, inplace=inplace)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_(np.all(x[mask] == T(val)))
assert_(x.dtype == T)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_masked_array(self):
## x = np.array([1,2,3])
## z = np.ma.array(x,mask=[True,False,False])
## np.putmask(z,[True,True,True],3)
pass
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [nan, inf, -inf, nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ');
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ');
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from("nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[nan, nan, nan, nan, nan, nan, nan],
sep=' ')
def test_inf(self):
self._check_from("inf +inf -inf infinity -Infinity iNfInItY -inF",
[inf, inf, -inf, inf, -inf, inf, -inf], sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.0,3.51,4.0')
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.a.flat[12] == 12.0
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.b.flat[4] == 12.0
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert c.flags.writeable is False
assert d.flags.writeable is False
assert e.flags.writeable is True
assert f.flags.writeable is True
assert c.flags.updateifcopy is False
assert d.flags.updateifcopy is False
assert e.flags.updateifcopy is False
assert f.flags.updateifcopy is True
assert f.base is self.b0
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
def test_int_shape(self):
x = np.eye(3)
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = ones(10, dtype=[('k', object, 2)])
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
assert_raises(IndexError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(IndexError, b[0].__setitem__, fnn, 1)
assert_raises(IndexError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple Subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2))
view_dtype=[('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
mat = np.eye(3)
# stats for integer types
# fixme:
# this needs definition as there are lots places along the line
# where type casting may take place.
#for f in self.funcs:
#for c in icodes:
#tgt = np.dtype(c).type
#res = f(mat, axis=1, dtype=c).dtype.type
#assert_(res is tgt)
## scalar case
#res = f(mat, axis=None, dtype=c).dtype.type
#assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in fcodes:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = array([[1, 2], [3, 4]], order='C')
b = array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
class TestDot(TestCase):
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = array([[1, 2], [3, 4]], order='C')
b = array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_dot_override(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_accelerate_framework_sgemv_fix(self):
if sys.platform != 'darwin':
return
def aligned_array(shape, align, dtype, order='C'):
d = dtype()
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = matmul(v, v)
assert_(type(res) is dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = matmul(v, m1)
assert_equal(res, tgt1)
res = matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = matmul(vec, mat1)
assert_equal(res, tgt1)
res = matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = matmul(m1, v)
assert_equal(res, tgt1)
res = matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = matmul(vec, mat1)
assert_equal(res, tgt1)
res = matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt22 = np.array([[1, 0], [2, 1]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = matmul(m1, m2)
assert_equal(res, tgt12)
res = matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt22 = m2
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = matmul(m1, m2)
assert_equal(res, tgt12)
res = matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
from operator import matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
class TestInner(TestCase):
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = zeros(shape = (1, 80), dtype = float64)
p = inner(a, a)
assert_almost_equal(p, 0, decimal=14)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*ones((3,), dtype=int)
self.y = 3*ones((3,), dtype=int)
self.x2 = 2*ones((2, 3), dtype=int)
self.y2 = 3*ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([dtype(b).itemsize for a, b in dt])
if dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert memoryview(c).strides == (800, 80, 8)
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert memoryview(fortran).strides == (8, 80, 800)
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
"""ticket #2046, should not seqfault, raise AttributeError"""
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr' : '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[ 0., 1., 2., 19.,],
[ 104., 5., 6., 7.,],
[ 8., 9., 40., 11.,]])
b = arange(6).astype(float)
index = (array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [ 100.1, 51., 6., 3., 4., 5. ])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class PriorityNdarray():
__array_priority__ = 1000
def __init__(self, array):
self.array = array
def __lt__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array < array)
def __gt__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array > array)
def __le__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array <= array)
def __ge__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array >= array)
def __eq__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array == array)
def __ne__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array != array)
class TestArrayPriority(TestCase):
def test_lt(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l < r
res2 = l < rp
res3 = lp < r
res4 = lp < rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_gt(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l > r
res2 = l > rp
res3 = lp > r
res4 = lp > rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_le(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l <= r
res2 = l <= rp
res3 = lp <= r
res4 = lp <= rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_ge(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l >= r
res2 = l >= rp
res3 = lp >= r
res4 = lp >= rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_eq(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l == r
res2 = l == rp
res3 = lp == r
res4 = lp == rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_ne(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l != r
res2 = l != rp
res3 = lp != r
res4 = lp != rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
#All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object);
m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
from numpy.core._internal import _view_is_safe
class TestObjViewSafetyFuncs(TestCase):
def test_view_safety(self):
psize = dtype('p').itemsize
# creates dtype but with extra character code - for missing 'p' fields
def mtype(s):
n, offset, fields = 0, 0, []
for c in s.split(','): #subarrays won't work
if c != '-':
fields.append(('f{0}'.format(n), c, offset))
n += 1
offset += dtype(c).itemsize if c != '-' else psize
names, formats, offsets = zip(*fields)
return dtype({'names': names, 'formats': formats,
'offsets': offsets, 'itemsize': offset})
# test nonequal itemsizes with objects:
# these should succeed:
_view_is_safe(dtype('O,p,O,p'), dtype('O,p,O,p,O,p'))
_view_is_safe(dtype('O,O'), dtype('O,O,O'))
# these should fail:
assert_raises(TypeError, _view_is_safe, dtype('O,O,p'), dtype('O,O'))
assert_raises(TypeError, _view_is_safe, dtype('O,O,p'), dtype('O,p'))
assert_raises(TypeError, _view_is_safe, dtype('O,O,p'), dtype('p,O'))
# test nonequal itemsizes with missing fields:
# these should succeed:
_view_is_safe(mtype('-,p,-,p'), mtype('-,p,-,p,-,p'))
_view_is_safe(dtype('p,p'), dtype('p,p,p'))
# these should fail:
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,p'))
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,-'))
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('-,p'))
# scans through positions at which we can view a type
def scanView(d1, otype):
goodpos = []
for shift in range(d1.itemsize - dtype(otype).itemsize+1):
d2 = dtype({'names': ['f0'], 'formats': [otype],
'offsets': [shift], 'itemsize': d1.itemsize})
try:
_view_is_safe(d1, d2)
except TypeError:
pass
else:
goodpos.append(shift)
return goodpos
# test partial overlap with object field
assert_equal(scanView(dtype('p,O,p,p,O,O'), 'p'),
[0] + list(range(2*psize, 3*psize+1)))
assert_equal(scanView(dtype('p,O,p,p,O,O'), 'O'),
[psize, 4*psize, 5*psize])
# test partial overlap with missing field
assert_equal(scanView(mtype('p,-,p,p,-,-'), 'p'),
[0] + list(range(2*psize, 3*psize+1)))
# test nested structures with objects:
nestedO = dtype([('f0', 'p'), ('f1', 'p,O,p')])
assert_equal(scanView(nestedO, 'p'), list(range(psize+1)) + [3*psize])
assert_equal(scanView(nestedO, 'O'), [2*psize])
# test nested structures with missing fields:
nestedM = dtype([('f0', 'p'), ('f1', mtype('p,-,p'))])
assert_equal(scanView(nestedM, 'p'), list(range(psize+1)) + [3*psize])
# test subarrays with objects
subarrayO = dtype('p,(2,3)O,p')
assert_equal(scanView(subarrayO, 'p'), [0, 7*psize])
assert_equal(scanView(subarrayO, 'O'),
list(range(psize, 6*psize+1, psize)))
#test dtype with overlapping fields
overlapped = dtype({'names': ['f0', 'f1', 'f2', 'f3'],
'formats': ['p', 'p', 'p', 'p'],
'offsets': [0, 1, 3*psize-1, 3*psize],
'itemsize': 4*psize})
assert_equal(scanView(overlapped, 'p'), [0, 1, 3*psize-1, 3*psize])
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
if sys.version_info[0] < 3:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
cainiaocome/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
florian-f/sklearn | examples/linear_model/plot_sgd_separating_hyperplane.py | 12 | 1201 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
pl.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.axis('tight')
pl.show()
| bsd-3-clause |
spallavolu/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/SoftContact_NonLinHardSoftShear/Area/Normal_Stress_Plot.py | 6 | 4510 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
import matplotlib;
import math;
from matplotlib.ticker import MaxNLocator
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
plt.style.use('grayscale')
###############################################################
## Area = 1*m^2
###############################################################
# Go over each feioutput and plot each one.
thefile = "A_1/Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(normal_strain,normal_stress/1000,label=r'Area = $1 m^2$', Linewidth=4, markersize=20)
plt.xlabel(r"Interface Type #")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
plt.hold(True)
###############################################################
## Area = 1e^2 m^2
###############################################################
# Go over each feioutput and plot each one.
thefile = "A_1e2/Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain,normal_stress/1000,label=r'Area = $1e^2 m^2$', Linewidth=4, markersize=20)
# plt.xlabel(r"Normal Strain $\epsilon$")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
###############################################################
## Area = 1e^-2 m^2
###############################################################
# Go over each feioutput and plot each one.
thefile = "A_1e-2/Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain,normal_stress/1000,label=r'Area = $1e^{-2} m^2$', Linewidth=4, markersize=20)
plt.xlabel(r"Normal Strain $\epsilon$")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
###############################################################
## Area = 1e^-4 m^2
###############################################################
# Go over each feioutput and plot each one.
thefile = "A_1e-4/Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain,normal_stress/1000,label=r'Area = $1e^{-4} m^2$', Linewidth=4, markersize=20)
plt.xlabel(r"Normal Strain $\epsilon$")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
#############################################################
# # # axes = plt.gca()
# # # axes.set_xlim([-7,7])
# # # axes.set_ylim([-1,1])
# outfigname = "Interface_Test_Normal_Stress.pdf";
# plt.axis([0, 5.5, 90, 101])
# legend = plt.legend()
# legend.get_frame().set_linewidth(0.0)
# legend.get_frame().set_facecolor('none')
plt.legend()
plt.savefig('Normal_Stress.pdf', bbox_inches='tight')
# plt.show()
| cc0-1.0 |
SCECcode/BBP | bbp/comps/bbp_status.py | 1 | 9070 | #!/usr/bin/env python
"""
Copyright 2010-2018 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Module BBP Status (adapted from CSEP Status)
"""
from __future__ import division, print_function
# Import Python modules first
import os
import sys
import time
# Import Broadband modules now
from install_cfg import InstallCfg
import bband_utils
class BBPStatus(object):
"""
This class is designed to acquire the current BBP system and
software status
"""
# Static data members
# Name of the file with system status information
system_type = "system_status"
# Name of the file with software status information
software_type = "software_status"
# List of commands that used to capture external software version
# and flag if command output is on stderr (True). If command
# output is redirected to the stderr, the flag should be set to
# 'True' we do not trigger it as a failure
__all_packages = [["GCC Version", "cat $BBP_DIR/src/gcc.version", False],
["GFORTRAN Version",
"cat $BBP_DIR/src/gfortran.version", False],
["NumPy Version",
"python -c 'import numpy; print numpy.__version__;'",
False],
["SciPy Version",
"python -c 'import scipy; print scipy.__version__;'",
False],
["Matplotlib Version",
"python -c 'import matplotlib; print matplotlib.__version__;'",
False],
["PyProj Version",
"python -c 'import pyproj; print pyproj.__version__;'",
False]]
# Leave some envrionment variables out, to avoid capturing too much
# personal information (and others that capture too much useless
# stuff!)
__exclude_env = ["LS_COLORS",
"SSH_CONNECTION",
"SSH_TTY",
"SSH_CLIENT",
"SSH_ASKPASS",
"MAIL"]
#--------------------------------------------------------------------
#
# Initialization
#
# Input:
# options - Command-line options including defaults ones used
# by caller program. Default is None.
#
def __init__(self, sim_id=0, options=None):
"""
Initialization for BBPStatus class
"""
self.sim_id = sim_id
self.__options = options
self.install = InstallCfg.getInstance()
self.outdir = os.path.join(self.install.A_OUT_DATA_DIR, str(sim_id))
#--------------------------------------------------------------------
#
# Get names of the files for system status
#
# Input: None
#
# Output: A tuple of data and corresponding metadata filenames
#
def system_filename(self):
"""
Get the name of the system status file
"""
filename = os.path.join(self.outdir,
"%s-%d.txt" % (self.system_type, self.sim_id))
return filename
#--------------------------------------------------------------------
#
# Get names of the files for system status
#
# Input: None
#
# Output: A list of data filename and corresponding metadata filename
#
def software_filename(self):
"""
Get the name of the software status file
"""
filename = os.path.join(self.outdir,
"%s-%d.txt" % (self.software_type, self.sim_id))
return filename
#--------------------------------------------------------------------
#
# Capture status of the system
#
# Input: datafile - Name of the file to capture status information to
# Default is None
#
# Output: None
#
def system(self, datafile=None):
"""
Capture system status to the file
"""
if datafile is None:
datafile = self.system_filename()
# Create data file
try:
fhandle = open(datafile, 'a')
except IOError:
print("Unable to open output data file: %s!" % (datafile))
sys.exit(1)
fhandle.write("\n%s\n" % ('='*80))
fhandle.write("%s" %
(time.strftime("%a %d %b %Y %X %Z", time.localtime())))
fhandle.write("\n%s\n\n" % ('='*80))
# Store host info
fhandle.write("%s: %s\n\n" % ("uname", os.uname()))
# Store user info
command = "id"
fhandle.write("%s: %s\n" %
(command,
bband_utils.get_command_output(command,
abort_on_error=True)))
# Store executable and command-line options
fhandle.write("%s: %s\n\n" % ("argv", sys.argv))
# Store executable and command-line options including the default ones
if self.__options is not None:
fhandle.write("%s: %s\n\n" % ("command-line options", self.__options))
# Store environment variables
fhandle.write("Environment Variables\n")
for item in os.environ:
if item in BBPStatus.__exclude_env:
continue
fhandle.write("%s = %s\n" % (item, os.environ[item]))
# Store shell resource information
fhandle.write("\nResource Information (ulimit -a)\n")
command = "ulimit -a"
fhandle.write("%s\n" %
(bband_utils.get_command_output(command,
abort_on_error=True)))
# Close the file
fhandle.close()
#--------------------------------------------------------------------
#
# Capture status of the software used by the system
#
# Input: program_name - Name of the calling program
# program_version - Version of the calling program
# datafile - Names of the file and metadata file to capture status
# information to. Default is None
#
# Output: None
#
def software(self, datafile=None):
"""
Capture software status to the file
"""
# Make sure we have the data filename
if datafile is None:
datafile = self.software_filename()
# Create data file
try:
fhandle = open(datafile, 'a')
except IOError:
print("Unable to open output data file: %s!" % (datafile))
sys.exit(1)
fhandle.write("\n%s\n" % ('='*80))
fhandle.write("%s" %
(time.strftime("%a %d %b %Y %X %Z", time.localtime())))
fhandle.write("\n%s\n\n" % ('='*80))
# Store version of calling program
fhandle.write("Broadband version: %s\n" % (self.install.VERSION))
# Store python version
fhandle.write("%s: %s\n\n" % ("Python version", sys.version))
for package in BBPStatus.__all_packages:
name = package[0]
command = package[1]
output_on_stderr = package[2]
if isinstance(output_on_stderr, bool) is True:
# Version command is provided
msg = bband_utils.get_command_output(command,
output_on_stderr,
abort_on_error=True)
fhandle.write("%s: %s\n" % (name, msg))
else:
# Version string is provided in 'output_on_stderr'
# local variable
fhandle.write("%s: %s\n" % (name, output_on_stderr))
# Close the file
fhandle.close()
#--------------------------------------------------------------------
#
# Get user name of the running process
#
# Input: None
#
# Output: A username
#
def user_name():
"""
Get the user name of the running process
"""
name = bband_utils.get_command_output("whoami",
abort_on_error=True)
# Strip newline if any
return name.replace("\n", "")
user_name = staticmethod(user_name)
# Invoke the module
if __name__ == '__main__':
STATUS = BBPStatus()
# System status
FILENAME = STATUS.system_filename()
STATUS.system(FILENAME)
# Software status
FILENAME = STATUS.software_filename()
STATUS.software(FILENAME)
# end of main
| apache-2.0 |
faraz117/Traffic-Congestion-Estimator | EdgeDetection2.py | 1 | 16913 |
from __future__ import division
import cv2
import numpy as np
import time
import os
import imutils
from sklearn.externals import joblib
from scipy.cluster.vq import *
# initialize the current frame of the video, along with the list of
# ROI points along with whether or not this is input mode
roiPts = []
inputMode = False
image= None ;
clf, classes_names, stdSlr, k, voc = joblib.load("bof.pkl")
sift = cv2.xfeatures2d.SIFT_create()
# Adaptive Threshold Based on Histogram or color of the image or do something!
def checkifonLine(pt,pt1,pt2,window): # this function is computes the existence of centroid on the trigger line
slope=(pt2[1]-pt1[1])/(pt2[0]-pt1[0])
X=pt2[0];
for num in range (X):
if (pt[0] in range(num-window , num + window)) and (pt[1] in range ((int(num*slope) - window),(int(num*slope) + window))):
go=True
break
else:
go=False
if go:
return True
else:
return False
def non (x):
pass
def mse(imageA, imageB):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
imageA = cv2.resize(imageA,(80, 80), interpolation = cv2.INTER_LINEAR)
imageB = cv2.resize(imageB,(80, 80), interpolation = cv2.INTER_LINEAR)
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
def fetchClass (imagetoClassify):
kpts, des = sift.detectAndCompute(imagetoClassify, None)
test_features = np.zeros((1, k), "float32")
words, distance = vq(des,voc)
for w in words:
test_features[0][w] += 1
#nbr_occurences = np.sum( (test_features > 0) * 1, axis = 0)
#idf = np.array(np.log((1.0+1) / (1.0*nbr_occurences + 1)), 'float32')
test_features = stdSlr.transform(test_features)
predictions = [classes_names[i] for i in clf.predict(test_features)]
return predictions
def selectROI(event, x, y, flags, param):
# grab the reference to the current frame, list of ROI
# points and whether or not it is ROI selection mode
global frame, roiPts, inputMode
# if we are in ROI selection mode, the mouse was clicked,
# and we do not already have four points, then update the
# list of ROI points with the (x, y) location of the click
# and draw the circle
if inputMode and event == cv2.EVENT_LBUTTONDOWN and len(roiPts) < 4:
roiPts.append((x, y))
cv2.circle(image, (x, y), 4, (0, 255, 0), 2)
cv2.imshow("frame",image)
def main():
count=0
count_LTV=0
count_HGV=0
count_MTV=0
carSize=500
carType=0;
stateHGV=False
prevObject=None
objectDetected=None
fgbg = cv2.createBackgroundSubtractorMOG2(500,168,1)
timerNow=0
cv2.namedWindow("frame")
cv2.setMouseCallback("frame", selectROI)
cap = cv2.VideoCapture('v5.avi')
roiBox = None
global image, roiPts, inputMode
f=open('shapeData.txt','w')
#cv2.namedWindow('trackBar',flags=cv2.WINDOW_NORMAL)
while True:
ret, image = cap.read()
key1 = cv2.waitKey(5) & 0xFF
if key1 == ord("t"):
cv2.imwrite("frame%d.jpg"%count,image)
count+=1
#image= image- image.mean()
if not ret:
break
if roiBox is not None :
roi = image[tl[1]:br[1], tl[0]:br[0]]
#pt1=cv2.getTrackbarPos('pts1','trackBar')
#pt2=cv2.getTrackbarPos('pts2','trackBar')
#pt3=cv2.getTrackbarPos('pts3','trackBar')
#pt4=cv2.getTrackbarPos('pts4','trackBar')
#pt5=cv2.getTrackbarPos('pts5','trackBar')
#pt6=cv2.getTrackbarPos('pts6','trackBar')
#rows,cols,ch = roi.shape
#pts1 = np.float32([[tl[0],tl[1]],[br[0],tl[0]],[br[0],br[1] ]])
#pts2 = np.float32([[pt1,pt2],[pt3,pt4],[pt5,pt6]])
#M = cv2.getAffineTransform(pts1,pts2)
#des = cv2.warpAffine(roi,M,(cols,rows))
#cv2.imshow('picture',des)
fgmask = fgbg.apply(roi)
ret, ShadowRemoved = cv2.threshold(fgmask,70,255,cv2.THRESH_BINARY)
cv2.imshow('shadow',ShadowRemoved)
#initialize the kernel for dilation and erosion
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(8,8))#filtering mask
img = cv2.medianBlur(ShadowRemoved,3)
blur = cv2.GaussianBlur(img,(3,3),0)
#thresholding
ret, th = cv2.threshold(img,63,255,cv2.THRESH_BINARY)
ret, th2 = cv2.threshold(blur,64,255,cv2.THRESH_BINARY)
closing = cv2.morphologyEx(th2, cv2.MORPH_CLOSE, kernel)
#filter closing
filteredClose = cv2.medianBlur(closing,1)
#get the masked data.
masked_data = cv2.bitwise_and(roi, roi, mask=filteredClose)
masked_data2 = cv2.bitwise_and(roi, roi, mask=filteredClose)
#apply the mask and get the new frame
maskApplied = cv2.add(roi,masked_data)
#get contours to track the object
gray = cv2.cvtColor(masked_data2,cv2.COLOR_BGR2GRAY)
ret,thres = cv2.threshold(gray,40,255,cv2.THRESH_BINARY)
edges = cv2.Canny(ShadowRemoved.copy(),100,200)
#cv2.imshow('edgy',edges)
#detector = cv2.FastFeatureDetector_create()
# Detect blobs.
#keypoints = detector.detect(ShadowRemoved)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
#im_with_keypoints = cv2.drawKeypoints(ShadowRemoved, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#cv2.imshow('ShadowRemoved',im_with_keypoints)
uncleanimage,contours, hierarchy = cv2.findContours(thres,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#cv2.drawContours(maskApplied, contours, -1, (0,255,0), 3)
#cv2.imshow('threshold',thres)
cv2.putText(maskApplied,'HTV:',(100,50),0,1,(0,0,0,255),3)
cv2.putText(maskApplied,str(count_HGV),(200,50),0,1,(0,0,0,255),3)
cv2.putText(maskApplied,'LTV:',(100,100),0,1,(0,0,0,255),3)
cv2.putText(maskApplied,str(count_LTV),(200,100),0,1,(0,0,0,255),3)
cv2.putText(maskApplied,'MTV:',(100,150),0,1,(0,0,0,255),3)
cv2.putText(maskApplied,str(count_MTV),(200,150),0,1,(0,0,0,255),3)
maskedWidth,maskedHeight,maskedChannel=maskApplied.shape
if time.time() - timerNow > 0.7:
stateHGV=False
for con in contours:
rect = cv2.minAreaRect(con) #I have used min Area rect for better result
width = rect[1][0]
height = rect[1][1] #centeroid
if(width<1000) and (height <1000) and (width >= 20) and (height >20):
#Box with rotation according to size and angle of contour
M = cv2.moments(con)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
centroid = (cx,cy)
cv2.circle(maskApplied,centroid,5,(0,255,0),5)
if checkifonLine(centroid,(0,0),(maskedHeight-30,maskedWidth-30),4):
cv2.line(maskApplied,(20,20),(maskedHeight,maskedWidth),(0,0,255),2)
area=cv2.contourArea(con)
x,y,w,h = cv2.boundingRect(con)
cv2.rectangle(maskApplied,(x,y),(x+w,y+h),(0,255,0),2)
objectDetected=roi[y:y+h,x:x+w]
intwidth=int(width)
intheight=int(height)
if area > carSize*16 and area < carSize*200 :
if intheight in range(intwidth-30,intwidth+30):
cv2.putText(maskApplied,'MTV Detected',(x+w-10,y+h-70),0,.8,(0,255,0),2)
cv2.putText(maskApplied,str(height),(x+w-20,y+h-100),0,.8,(255,0,0),2)
cv2.putText(maskApplied,str(width),(x+w-20,y+h-120),0,.8,(255,0,0),2)
carType=2
else:
if not stateHGV:
cv2.putText(maskApplied,'HTV Detected',(x+w-10,y+h-70),0,.8,(0,255,0),2)
cv2.putText(maskApplied,str(height),(x+w-20,y+h-100),0,.8,(255,0,0),2)
cv2.putText(maskApplied,str(width),(x+w-20,y+h-120),0,.8,(255,0,0),2)
carType=1
if area > carSize and area < carSize*16:
cv2.putText(maskApplied,'LTV Detected',(x+w-10,y+h-70),0,.8,(0,255,0),2)
cv2.putText(maskApplied,str(height),(x+w-20,y+h-100),0,.8,(255,0,0),2)
cv2.putText(maskApplied,str(width),(x+w-20,y+h-120),0,.8,(255,0,0),2)
carType=-1
#if finalObject == None:
#finalObject=objectDetected
#if (finalObject is not None) and len(objectDetected) > len(finalObject):
#finalObject=objectDetected
cv2.line(maskApplied,(0,0),(maskedHeight-30,maskedWidth-30),(0,255,0),3)
#cv2.line(maskApplied,roiPts[2],roiPts[3],(0,0,255),3)
#cv2.imshow('edges', edges)
#cv2.imshow('backgroundSubtract',fgmask)
if objectDetected is not None:
if prevObject == None:
count+=1
prevObject=objectDetected
objectDetected = cv2.resize(objectDetected,(80, 80), interpolation = cv2.INTER_LINEAR)
carClass=fetchClass(objectDetected)
if carClass == ['light']:
print "light"
carType=-1
if carClass == ['heavy']:
print "heavy"
carType=1
if carClass == ['medium']:
print "medium"
carType=2
if carClass == ['other']:
print "other"
carType=0
if(carType==1):
stateHGV=True
timerNow=time.time()
count_HGV+=1
cv2.imwrite("./Heavy/object%d.jpg"%count,objectDetected)
if(carType==-1):
count_LTV+=1
cv2.imwrite("./Light/object%d.jpg"%count,objectDetected)
if(carType==2):
count_MTV+=1
cv2.imwrite("./Medium/object%d.jpg"%count,objectDetected)
elif mse(prevObject,objectDetected)> 3200:
count+=1
prevObject=objectDetected
objectDetected = cv2.resize(objectDetected,(80, 80), interpolation = cv2.INTER_LINEAR)
carClass=fetchClass(objectDetected)
if carClass == ['light']:
print "light"
carType=-1
if carClass == ['heavy']:
print "heavy"
carType=1
if carClass == ['medium']:
print "medium"
carType=2
if carClass == ['other']:
print "other"
carType==0
#cv2.imwrite("object%d.jpg"%count,objectDetected)
#f.write('object: %d,aspect_ratio: %d,solidity: %f,area: %d,extent: %f,circle_dia: %d\n'%(count,aspect_ratio,solidity,area,extent,equi_diameter))
if(carType==1):
f.write(' HGV\n')
stateHGV=True
timerNow=time.time()
count_HGV+=1
if not os.path.exists("./Heavy/"):
os.makedirs("./Heavy/")
cv2.imwrite("./Heavy/object%d.jpg"%count,objectDetected)
if(carType==-1):
f.write(' LTV\n')
count_LTV+=1
if not os.path.exists("./Light/"):
os.makedirs("./Light/")
cv2.imwrite("./Light/object%d.jpg"%count,objectDetected)
if(carType==2):
f.write(' MTV\n')
count_MTV+=1
if not os.path.exists("./Medium/"):
os.makedirs("./Medium/")
cv2.imwrite("./Medium/object%d.jpg"%count,objectDetected)
cv2.imshow("object",objectDetected)
cv2.imshow('MaskApplied',maskApplied)
image[tl[1]:br[1], tl[0]:br[0]]=roi
cv2.imshow("frame", image)
key = cv2.waitKey(1) & 0xFF
if key == ord("i") and len(roiPts) < 4:
# indicate that we are in input mode and clone the
# frame
inputMode = True
cv2.putText(image,'INPUT MODE !',(100,50),0,1,(0,0,255),3)
while len(roiPts) < 4:
cv2.imshow("frame", image)
cv2.waitKey(0)
# determine the top-left and bottom-right points
roiPts = np.array(roiPts)
s = roiPts.sum(axis = 1)
tl = roiPts[np.argmin(s)]
br = roiPts[np.argmax(s)]
# grab the ROI for the bounding box and convert it
# to the HSV color space
roiBox = (tl[0], tl[1], br[0], br[1])
# keep looping until 4 reference ROI points have
# been selected; press any key to exit ROI selction
# mode once 4 points have been selected
if cv2.waitKey(10)== 27:
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main() | gpl-2.0 |
jseabold/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
dsquareindia/scikit-learn | sklearn/datasets/__init__.py | 61 | 3734 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_breast_cancer
from .base import load_boston
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_sample_images
from .base import load_sample_image
from .base import load_wine
from .base import get_data_home
from .base import clear_data_home
from .covtype import fetch_covtype
from .kddcup99 import fetch_kddcup99
from .mlcomp import load_mlcomp
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'fetch_kddcup99',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'load_wine',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
ningchi/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 233 | 7819 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
rcompton/black-market-recommender-systems | bmrs/parsers/silkroad2.py | 1 | 3561 | #!/usr/bin/python3
# coding: utf-8
from bs4 import BeautifulSoup
import re
import pandas as pd
import dateutil
import os
import traceback
import unicodedata as ud
import logging
FORMAT = '%(asctime)-15s %(levelname)-6s %(message)s'
DATE_FORMAT = '%b %d %H:%M:%S'
formatter = logging.Formatter(fmt=FORMAT, datefmt=DATE_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
DATA_DIR = '/home/aahu/Desktop/silkroad2/'
RESULT_DIR = 'data/silkroad2/'
if not os.path.exists(RESULT_DIR):
os.mkdir(RESULT_DIR)
def html_to_df(fname, fdate, cat):
"""
parse a silkroad2 html file
"""
try:
soup = BeautifulSoup(open(fname, encoding='utf-8', errors='ignore'))
except UnicodeDecodeError:
logger.info('UnicodeDecodeError... meh {}'.format(fname))
return
items = soup.find_all('div', {'class', 'item'})
if not items:
logger.warning('no items in {}'.format(fname))
return
l = []
for item in items:
if not item.find('div', {'class', 'item_title'}):
continue
listing = item.find('div', {'class', 'item_title'}).find('a').text
price = item.find('div', {'class', 'price'})
if not price:
price = item.find('div', {'class', 'price_big'})
if not price:
price = None
else:
price = price.text
dtag = item.find('div', {'class', 'item_details'})
vtag = item.find('div', {'class', 'vendor'})
vendor = None
if vtag:
if vtag.find('a'):
vendor = vtag.find('a').text
if not vendor:
if dtag:
if dtag.find('a'):
vendor = dtag.find('a').text
ships_from = None
ships_to = None
stag = item.find('div', {'class', 'shipping'})
if stag:
try:
sl = stag.text.split('\n')
ships_from = [x for x in sl if 'ships from:' in x.lower()][0]
ships_from = ships_from.replace('ships from:', '').strip()
ships_to = [x for x in sl if 'ships to:' in x.lower()][0].strip()
ships_to = ships_to.replace('ships to:', '').strip()
except:
logger.info(stag)
else:
if dtag:
try:
sl = dtag.text.split('\n')
ships_from = [x for x in sl if 'ships from:' in x.lower()][0]
ships_from = ships_from.replace('ships from:', '').strip()
ships_to = [x for x in sl if 'ships to:' in x.lower()][0].strip()
ships_to = ships_to.replace('ships to:', '').strip()
except:
logger.info(dtag)
d = {}
d['listing'] = listing
d['price_btc'] = price
d['vendor'] = vendor
d['ships_from'] = ships_from
d['ships_to'] = ships_to
d['category'] = cat
d['scrape_date'] = fdate
l.append(d)
return pd.DataFrame(l)
def main():
for datestr in os.listdir(DATA_DIR):
fdate = dateutil.parser.parse(datestr).date()
l = []
datedir = os.path.join(DATA_DIR, datestr)
catdir = os.path.join(datedir, 'categories')
if not os.path.exists(catdir):
continue
logger.info(catdir)
l = []
for cat in os.listdir(catdir):
dname = os.path.join(catdir, cat)
for f in os.listdir(dname):
fname = os.path.join(dname, f)
catf = html_to_df(fname, fdate=fdate, cat=cat)
l.append(catf)
df = pd.concat(l)
outname = 'silkroad2_' + fdate.isoformat() + '.tsv'
df.to_csv(os.path.join(RESULT_DIR, outname), '\t', index=False)
logger.info('wrote {0} lines to: {1}'.format(len(df), outname))
if __name__ == '__main__':
main()
| gpl-3.0 |
Salahub/uwaterloo-igem-2015 | models/tridimensional/docking_validation/csv_results_clusterscore.py | 6 | 10637 | import argparse
import csv
import matplotlib.pyplot as plt
import numpy as np
import os
import scipy.cluster.hierarchy as hac
import scipy.spatial.distance as scidist
from csv_results import csv_header_bugfix
from constants import CSV_HEADER, DNA_ALPHABET
from utility import safe_mkdir
def csv_load(fullpath):
"""Loads csv data into memory
Args:
fullpath: full path to file (e.g. root/some_dir/some_file.csv)
Returns:
tuple: header, and a list of lists created from file.readlines() method
"""
assert fullpath[-4:] == '.csv'
with open(fullpath, 'rb') as f:
reader = csv.reader(f)
csv_data = []
for i, row in enumerate(reader):
if i == 0:
assert CSV_HEADER[0] == row[0] # make sure file has header and that it matches expected header start
csv_header = row
else:
csv_data.append(row)
# legacy header bug fix check (64pam run but header is for 256pam)
if csv_header[3] == 'PAM_4' and csv_data[0][3] not in DNA_ALPHABET:
csv_header.pop(3)
csv_header_bugfix(fullpath)
return csv_header, csv_data
def csv_to_dict(fullpath, keys=['Final DNA']):
"""Loads csv data into memory, then converts the data into dictionary format
Args:
fullpath: full path to file (e.g. root/some_dir/some_file.csv)
keys: list of keys (referencing csv column stats like 'Final DNA' score) to make up the dictionary
Returns:
csv in dictionary format where stats reference dictionary: {statname: {pam: stat value} } -- see example
Example dictionary:
{'Final DNA':
{'aaaa': 1234.56,
...
'tttt': 4321.65}}
"""
csv_header, csv_data = csv_load(fullpath)
column_index_dict = {key: csv_header.index(key) for key in keys} # select columns for referencing data
pam_indices = [i for i, elem in enumerate(csv_header) if 'PAM_' in elem] # use to concatenate pam columns
csv_dict = {}
for key in keys:
csv_dict[key] = {}
for row in csv_data:
pam = ''.join([row[i] for i in pam_indices]) # concatenate pam
csv_dict[key][pam] = float(row[column_index_dict[key]]) # get pam's stat value
csv_dict['header'] = csv_header
return csv_dict
def get_cluster_linkage(data_to_cluster):
"""Gets a linkage object representing heirarchical cluster options defined by distance thresholds
Args:
data_to_cluster: vectorized data from dictionary returned from the csv_to_dict() function
stat_to_cluster: [default: 'Final DNA'] key for csv_dict corresponding to a statistic
Returns:
linkage object
See documentation:
http://docs.scipy.org/doc/scipy/reference/cluster.hierarchy.html
"""
cluster_linkage = hac.linkage(data_to_cluster, method='single', metric='euclidean')
return cluster_linkage
def plot_cluster_dendrogram(cluster_linkage, keylist, threshold='default'):
"""Dendrograms are representations of heirarchical clusters
See documentation:
http://docs.scipy.org/doc/scipy/reference/cluster.hierarchy.html
"""
leaf_label_map = lambda x: keylist[x]
plt.figure()
dendrodata = hac.dendrogram(cluster_linkage, color_threshold=threshold, leaf_label_func=leaf_label_map,
leaf_rotation=45.0, leaf_font_size=8)
# TODO find a way to make the extra information below less cramped when plotting
"""# segment to plot distances between clusters
for i, d in zip(dendrodata['icoord'], dendrodata['dcoord']):
x = 0.5 * sum(i[1:3])
y = d[1]
plt.plot(x, y, 'ro')
plt.annotate("%.3g" % y, (x, y), xytext=(0, -8),
textcoords='offset points',
va='top', ha='center')"""
if threshold != 'default':
plt.axhline(threshold, color='k', linestyle='--', label='threshold')
plt.show()
def cluster_csv_data(csv_dict, stat_to_cluster='Final DNA', plot_dendrogram_flag=True):
"""Clusters linkage object by applying a threshold to get a flat clustering
Args:
csv_dict: dictionary returned from the csv_to_dict() function
stat_to_cluster: [default: 'Final DNA'] key for csv_dict corresponding to a statistic
plot_dendrogram_flag: plot dendrogram if True
Returns:
csv data for that statistic in a clustered dictionary format (see example)
Example of returned dictionary:
{pam:
{'stat_value': float, <-- data value that's been clustered
'stat_cluster': int, <-- cluster rank (1 to n)
'stat_cluster_centroid': float}, <-- average value of associated cluster rank
... } <-- for all pams
See documentation:
http://docs.scipy.org/doc/scipy/reference/cluster.hierarchy.html
"""
# prepare data and compute distances
csv_data_as_keyvalue = csv_dict[stat_to_cluster]
data_to_cluster = [[csv_data_as_keyvalue[key]] for key in csv_data_as_keyvalue.keys()] # ignore pams, keep order
pair_dists = scidist.pdist(data_to_cluster, metric='euclidean')
# determine cluster membership
linkage = get_cluster_linkage(data_to_cluster)
threshold = 0.5 * np.std(pair_dists)
cluster_membership_array = hac.fcluster(linkage, threshold, criterion='distance')
# compute cluster centroid dictionary
lens_dict = {cluster_idx: 0 for cluster_idx in set(cluster_membership_array)}
centroid_dict = {cluster_idx: 0.0 for cluster_idx in set(cluster_membership_array)}
for i, cluster_idx in enumerate(cluster_membership_array):
centroid_dict[cluster_idx] += csv_data_as_keyvalue.values()[i]
lens_dict[cluster_idx] += 1
for cluster_idx in centroid_dict.keys():
centroid_dict[cluster_idx] = centroid_dict[cluster_idx] / lens_dict[cluster_idx] # take average
# revise cluster membership so that 'best' means cluster 1 instead of the last cluster (lower energy is better)
order_map = {cluster_idx: 0 for cluster_idx in set(cluster_membership_array)}
centroid_dict_reversed = {centroid_dict[cluster_idx]: cluster_idx for cluster_idx in centroid_dict.keys()}
for cluster_idx in set(cluster_membership_array):
min_centroid = np.min(centroid_dict_reversed.keys())
order_map[cluster_idx] = centroid_dict_reversed[min_centroid]
del centroid_dict_reversed[min_centroid]
order_map = {order_map[key]: key for key in order_map.keys()} # invert order map so it functions as intended
transform = lambda cluster_idx: order_map[cluster_idx]
for i, elem in enumerate(cluster_membership_array): # transform cluster_membership_array references
cluster_membership_array[i] = transform(elem)
centroid_dict = {order_map[cluster_idx]: centroid_dict[cluster_idx] for cluster_idx in centroid_dict.keys()}
# assign cluster membership
clustered_data = {}
for i, key in enumerate(csv_data_as_keyvalue.keys()):
clustered_data[key] = {'stat_value': csv_data_as_keyvalue[key],
'stat_cluster': cluster_membership_array[i],
'stat_cluster_centroid': centroid_dict[cluster_membership_array[i]]}
# conditionally plot dendrogram
if plot_dendrogram_flag:
plot_cluster_dendrogram(linkage, csv_data_as_keyvalue.keys(), threshold=threshold)
return clustered_data
def write_clustered_csv(fullpath_input, dir_output=None, stats_to_cluster=['Final DNA'], plot_dendrogram_flag=False):
"""Clusters specific data from an input csv and writes a new csv with appended clustering information
Args:
fullpath_input: full path to the input csv
dir_output: directory where the output csv will be placed
stats_to_cluster: list of stats to cluster
plot_dendrogram_flag: selectively plot the dendrogram of clusters
Returns:
full path to new csv with appended clustering information
"""
# IO preparation
assert fullpath_input[-4:] == '.csv'
dirpath, filename_input = os.path.split(fullpath_input)
filename_output = filename_input[:-4] + '_clustered.csv'
if dir_output is None:
fullpath_output = os.path.join(dirpath, filename_output)
else:
safe_mkdir(dir_output)
fullpath_output = os.path.join(dir_output, filename_output)
# load data for clustering
csv_dict = csv_to_dict(fullpath_input, keys=stats_to_cluster)
csv_header = csv_dict['header']
pam_indices = [i for i, elem in enumerate(csv_header) if 'PAM_' in elem] # use to concatenate pam columns
# cluster each stat separately and track header changes
cluster_dict = {}
csv_cluster_header = []
for stat in stats_to_cluster:
cluster_dict[stat] = cluster_csv_data(csv_dict, stat_to_cluster=stat, plot_dendrogram_flag=plot_dendrogram_flag)
csv_cluster_header.append('%s cluster' % stat)
csv_cluster_header.append('%s cluster centroid' % stat)
# write clustered data to csv
data_to_append = ['stat_cluster', 'stat_cluster_centroid']
with open(fullpath_input, 'r') as csvin:
reader = csv.reader(csvin)
with open(fullpath_output, 'wb') as csvout:
writer = csv.writer(csvout)
for i, row in enumerate(reader):
if i == 0:
writer.writerow(csv_header + csv_cluster_header)
else:
pam = ''.join([row[i] for i in pam_indices])
cluster_data_to_append = []
for stat in stats_to_cluster:
cluster_data_to_append += [cluster_dict[stat][pam][key] for key in data_to_append]
writer.writerow(row + cluster_data_to_append)
print "Finished writing cluster results to %s" % fullpath_output
return fullpath_output
if __name__ == '__main__':
# argument parsing
parser = argparse.ArgumentParser(description='Cluster given csv data into a new clustered csv.')
parser.add_argument('--path_input', metavar='C', type=str, help='directory of input csv file')
parser.add_argument('--dir_output', metavar='S', nargs='?', default=None,
type=str, help='directory to place output csv (default: same as input)')
parser.add_argument('--plot_on', metavar='F', nargs='?', const=True, default=False,
type=str, help='[switch] plot cluster dendrogram (default: no plot)')
args = parser.parse_args()
# write to csv
write_clustered_csv(args.path_input, dir_output=args.dir_output, plot_dendrogram_flag=args.plot_on)
| mit |
ryfeus/lambda-packs | Tensorflow_LightGBM_Scipy_nightly/source/numpy/core/function_base.py | 18 | 12340 | from __future__ import division, absolute_import, print_function
import warnings
import operator
from . import numeric as _nx
from .numeric import (result_type, NaN, shares_memory, MAY_SHARE_BOUNDS,
TooHardError,asanyarray)
__all__ = ['logspace', 'linspace', 'geomspace']
def _index_deprecate(i, stacklevel=2):
try:
i = operator.index(i)
except TypeError:
msg = ("object of type {} cannot be safely interpreted as "
"an integer.".format(type(i)))
i = int(i)
stacklevel += 1
warnings.warn(msg, DeprecationWarning, stacklevel=stacklevel)
return i
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float, optional
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
# 2016-02-25, 1.12
num = _index_deprecate(num)
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
# and make sure one can use variables that have an __array_interface__, gh-6634
start = asanyarray(start) * 1.0
stop = asanyarray(stop) * 1.0
dt = result_type(start, stop, float(num))
if dtype is None:
dtype = dt
y = _nx.arange(0, num, dtype=dt)
delta = stop - start
# In-place multiplication y *= delta/div is faster, but prevents the multiplicant
# from overriding what class is produced, and thus prevents, e.g. use of Quantities,
# see gh-7142. Hence, we multiply in place only for standard scalar types.
_mult_inplace = _nx.isscalar(delta)
if num > 1:
step = delta / div
if step == 0:
# Special handling for denormal numbers, gh-5437
y /= div
if _mult_inplace:
y *= delta
else:
y = y * delta
else:
if _mult_inplace:
y *= step
else:
y = y * step
else:
# 0 and 1 item long sequences have an undefined step
step = NaN
# Multiply with delta to allow possible override of output class.
y = y * delta
y += start
if endpoint and num > 1:
y[-1] = stop
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length `num`) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
geomspace : Similar to logspace, but with endpoints specified directly.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype)
def geomspace(start, stop, num=50, endpoint=True, dtype=None):
"""
Return numbers spaced evenly on a log scale (a geometric progression).
This is similar to `logspace`, but with endpoints specified directly.
Each output sample is a constant multiple of the previous.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The final value of the sequence, unless `endpoint` is False.
In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length `num`) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
logspace : Similar to geomspace, but with endpoints specified using log
and base.
linspace : Similar to geomspace, but with arithmetic instead of geometric
progression.
arange : Similar to linspace, with the step size specified instead of the
number of samples.
Notes
-----
If the inputs or dtype are complex, the output will follow a logarithmic
spiral in the complex plane. (There are an infinite number of spirals
passing through two points; the output will follow the shortest such path.)
Examples
--------
>>> np.geomspace(1, 1000, num=4)
array([ 1., 10., 100., 1000.])
>>> np.geomspace(1, 1000, num=3, endpoint=False)
array([ 1., 10., 100.])
>>> np.geomspace(1, 1000, num=4, endpoint=False)
array([ 1. , 5.62341325, 31.6227766 , 177.827941 ])
>>> np.geomspace(1, 256, num=9)
array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.])
Note that the above may not produce exact integers:
>>> np.geomspace(1, 256, num=9, dtype=int)
array([ 1, 2, 4, 7, 16, 32, 63, 127, 256])
>>> np.around(np.geomspace(1, 256, num=9)).astype(int)
array([ 1, 2, 4, 8, 16, 32, 64, 128, 256])
Negative, decreasing, and complex inputs are allowed:
>>> np.geomspace(1000, 1, num=4)
array([ 1000., 100., 10., 1.])
>>> np.geomspace(-1000, -1, num=4)
array([-1000., -100., -10., -1.])
>>> np.geomspace(1j, 1000j, num=4) # Straight line
array([ 0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j])
>>> np.geomspace(-1+0j, 1+0j, num=5) # Circle
array([-1.00000000+0.j , -0.70710678+0.70710678j,
0.00000000+1.j , 0.70710678+0.70710678j,
1.00000000+0.j ])
Graphical illustration of ``endpoint`` parameter:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> y = np.zeros(N)
>>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o')
>>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o')
>>> plt.axis([0.5, 2000, 0, 3])
>>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both')
>>> plt.show()
"""
if start == 0 or stop == 0:
raise ValueError('Geometric sequence cannot include zero')
dt = result_type(start, stop, float(num))
if dtype is None:
dtype = dt
else:
# complex to dtype('complex128'), for instance
dtype = _nx.dtype(dtype)
# Avoid negligible real or imaginary parts in output by rotating to
# positive real, calculating, then undoing rotation
out_sign = 1
if start.real == stop.real == 0:
start, stop = start.imag, stop.imag
out_sign = 1j * out_sign
if _nx.sign(start) == _nx.sign(stop) == -1:
start, stop = -start, -stop
out_sign = -out_sign
# Promote both arguments to the same dtype in case, for instance, one is
# complex and another is negative and log would produce NaN otherwise
start = start + (stop - stop)
stop = stop + (start - start)
if _nx.issubdtype(dtype, _nx.complexfloating):
start = start + 0j
stop = stop + 0j
log_start = _nx.log10(start)
log_stop = _nx.log10(stop)
result = out_sign * logspace(log_start, log_stop, num=num,
endpoint=endpoint, base=10.0, dtype=dtype)
return result.astype(dtype)
| mit |
lbdreyer/iris | docs/iris/src/conf.py | 2 | 10980 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
# -*- coding: utf-8 -*-
#
# Iris documentation build configuration file, created by
# sphinx-quickstart on Tue May 25 13:26:23 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# ----------------------------------------------------------------------------
import ntpath
import os
import sys
# function to write useful output to stdout, prefixing the source.
def autolog(message):
print("[{}] {}".format(ntpath.basename(__file__), message))
# -- Are we running on the readthedocs server, if so do some setup -----------
on_rtd = os.environ.get("READTHEDOCS") == "True"
if on_rtd:
autolog("Build running on READTHEDOCS server")
# list all the READTHEDOCS environment variables that may be of use
# at some point
autolog("Listing all environment variables on the READTHEDOCS server...")
for item, value in os.environ.items():
autolog("[READTHEDOCS] {} = {}".format(item, value))
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import datetime
import warnings
# custom sphinx extensions
sys.path.append(os.path.abspath("sphinxext"))
# add some sample files from the developers guide..
sys.path.append(os.path.abspath(os.path.join("developers_guide")))
# why isnt the iris path added to it is discoverable too? We dont need to,
# the sphinext to generate the api rst knows where the source is. If it
# is added then the travis build will likely fail.
# -- Project information -----------------------------------------------------
project = "Iris"
# define the copyright information for latex builds. Note, for html builds,
# the copyright exists directly inside "_templates/layout.html"
upper_copy_year = datetime.datetime.now().year
copyright = "Iris Contributors"
author = "Iris Developers"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
import iris
# The short X.Y version.
if iris.__version__ == "dev":
version = "dev"
else:
# major.minor.patch-dev -> major.minor.patch
version = ".".join(iris.__version__.split("-")[0].split(".")[:3])
# The full version, including alpha/beta/rc tags.
release = iris.__version__
autolog("Iris Version = {}".format(version))
autolog("Iris Release = {}".format(release))
# -- General configuration ---------------------------------------------------
# Create a variable that can be insterted in the rst "|copyright_years|".
# You can add more vairables here if needed
rst_epilog = """
.. |copyright_years| replace:: {year_range}
""".format(
year_range="2010 - {}".format(upper_copy_year)
)
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.todo",
"sphinx.ext.duration",
"sphinx.ext.coverage",
"sphinx.ext.viewcode",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx_copybutton",
"sphinx.ext.napoleon",
"sphinx_panels",
# TODO: Spelling extension disabled until the dependencies can be included
# "sphinxcontrib.spelling",
"sphinx_gallery.gen_gallery",
"matplotlib.sphinxext.mathmpl",
"matplotlib.sphinxext.plot_directive",
# better api documentation (custom)
"custom_class_autodoc",
"custom_data_autodoc",
"generate_package_rst",
]
# -- panels extension ---------------------------------------------------------
# See https://sphinx-panels.readthedocs.io/en/latest/
# -- Napoleon extension -------------------------------------------------------
# See https://sphinxcontrib-napoleon.readthedocs.io/en/latest/sphinxcontrib.napoleon.html
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True # includes dunders in api doc
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
napoleon_use_keyword = True
napoleon_custom_sections = None
# -- spellingextension --------------------------------------------------------
# See https://sphinxcontrib-spelling.readthedocs.io/en/latest/customize.html
spelling_lang = "en_GB"
# The lines in this file must only use line feeds (no carriage returns).
spelling_word_list_filename = ["spelling_allow.txt"]
spelling_show_suggestions = False
spelling_show_whole_line = False
spelling_ignore_importable_modules = True
spelling_ignore_python_builtins = True
# -- copybutton extension -----------------------------------------------------
# See https://sphinx-copybutton.readthedocs.io/en/latest/
copybutton_prompt_text = ">>> "
# sphinx.ext.todo configuration
# See https://www.sphinx-doc.org/en/master/usage/extensions/todo.html
todo_include_todos = True
# api generation configuration
autodoc_member_order = "groupwise"
autodoc_default_flags = ["show-inheritance"]
autosummary_generate = True
autosummary_imported_members = True
autopackage_name = ["iris"]
autoclass_content = "init"
modindex_common_prefix = ["iris"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# -- intersphinx extension ----------------------------------------------------
# See https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html
intersphinx_mapping = {
"cartopy": ("https://scitools.org.uk/cartopy/docs/latest/", None),
"matplotlib": ("https://matplotlib.org/stable/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"python": ("https://docs.python.org/3/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
}
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- plot_directive extension -------------------------------------------------
# See https://matplotlib.org/stable/api/sphinxext_plot_directive_api.html#options
plot_formats = [
("png", 100),
]
# -- Extlinks extension -------------------------------------------------------
# See https://www.sphinx-doc.org/en/master/usage/extensions/extlinks.html
extlinks = {
"issue": ("https://github.com/SciTools/iris/issues/%s", "Issue #"),
"pull": ("https://github.com/SciTools/iris/pull/%s", "PR #"),
}
# -- Doctest ("make doctest")--------------------------------------------------
doctest_global_setup = "import iris"
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_logo = "_static/iris-logo-title.png"
html_favicon = "_static/favicon.ico"
html_theme = "sphinx_rtd_theme"
html_theme_options = {
"display_version": True,
"style_external_links": True,
"logo_only": "True",
}
html_context = {
"copyright_years": "2010 - {}".format(upper_copy_year),
# menu_links and menu_links_name are used in _templates/layout.html
# to include some nice icons. See http://fontawesome.io for a list of
# icons (used in the sphinx_rtd_theme)
"menu_links_name": "Support",
"menu_links": [
(
'<i class="fa fa-github fa-fw"></i> Source Code',
"https://github.com/SciTools/iris",
),
(
'<i class="fa fa-comments fa-fw"></i> Users Google Group',
"https://groups.google.com/forum/#!forum/scitools-iris",
),
(
'<i class="fa fa-comments fa-fw"></i> Developers Google Group',
"https://groups.google.com/forum/#!forum/scitools-iris-dev",
),
(
'<i class="fa fa-question fa-fw"></i> StackOverflow for "How Do I?"',
"https://stackoverflow.com/questions/tagged/python-iris",
),
(
'<i class="fa fa-book fa-fw"></i> Legacy Documentation',
"https://scitools.org.uk/iris/docs/v2.4.0/index.html",
),
],
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_style = "theme_override.css"
# url link checker. Some links work but report as broken, lets ignore them.
# See https://www.sphinx-doc.org/en/1.2/config.html#options-for-the-linkcheck-builder
linkcheck_ignore = [
"http://cfconventions.org",
"http://code.google.com/p/msysgit/downloads/list",
"http://effbot.org",
"https://github.com",
"http://www.personal.psu.edu/cab38/ColorBrewer/ColorBrewer_updates.html",
"http://schacon.github.com/git",
"http://scitools.github.com/cartopy",
"http://www.wmo.int/pages/prog/www/DPFS/documents/485_Vol_I_en_colour.pdf",
"https://software.ac.uk/how-cite-software",
"http://www.esrl.noaa.gov/psd/data/gridded/conventions/cdc_netcdf_standard.shtml",
]
# list of sources to exclude from the build.
exclude_patterns = []
# -- sphinx-gallery config ----------------------------------------------------
# See https://sphinx-gallery.github.io/stable/configuration.html
sphinx_gallery_conf = {
# path to your example scripts
"examples_dirs": ["../gallery_code"],
# path to where to save gallery generated output
"gallery_dirs": ["generated/gallery"],
# filename pattern for the files in the gallery
"filename_pattern": "/plot_",
# filename patternt to ignore in the gallery
"ignore_pattern": r"__init__\.py",
}
# -----------------------------------------------------------------------------
# Remove matplotlib agg warnings from generated doc when using plt.show
warnings.filterwarnings(
"ignore",
category=UserWarning,
message="Matplotlib is currently using agg, which is a"
" non-GUI backend, so cannot show the figure.",
)
# -- numfig options (built-in) ------------------------------------------------
# Enable numfig.
numfig = True
numfig_format = {
"code-block": "Example %s",
"figure": "Figure %s",
"section": "Section %s",
"table": "Table %s",
}
| lgpl-3.0 |
toastedcornflakes/scikit-learn | examples/manifold/plot_mds.py | 88 | 2731 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# License: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color='navy', s=s, lw=0,
label='True Position')
plt.scatter(pos[:, 0], pos[:, 1], color='turquoise', s=s, lw=0, label='MDS')
plt.scatter(npos[:, 0], npos[:, 1], color='darkorange', s=s, lw=0, label='NMDS')
plt.legend(scatterpoints=1, loc='best', shadow=False)
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Blues,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
kpolimis/sklearn-forest-ci | doc/conf.py | 2 | 10581 | # -*- coding: utf-8 -*-
#
# project-template documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 18 14:44:12 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'numpydoc',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx_gallery.gen_gallery',
'sphinx.ext.autosummary'
]
sphinx_gallery_conf = {
# path to your examples scripts
'examples_dirs' : '../examples',
# path where to save gallery generated examples
'gallery_dirs' : 'auto_examples'}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'forestci'
copyright = u'2016, Kivan Polimis, Ariel Rokem, Bryna Hazelton, The University of Washington eScience Institute'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'skl-forest-ci-doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'skl-forest-ci.tex', u'Confidence Intervals for Scikit Learn Forest algorithms: Documentation',
u'Kivan Polimis, Ariel Rokem, & Bryna Hazelton', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'skl-forest-ci', u'Confidence Intervals for Scikit Learn Forest algorithms: Documentation',
[u'Kivan Polimis', u'Ariel Rokem', u'Bryna Hazelton'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'skl-forest-ci', u'skl-forest-ci Documentation',
u'Kivan Polimis, Ariel Rokem, Bryna Hazelton', 'skl-forest-ci', 'Confidence Intervals for Scikit Learn Forest Algorithms',
'Miscellaneous'),
]
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
currentdir = os.path.abspath(os.path.dirname(__file__))
ver_file = os.path.join(currentdir, '..', project, 'version.py')
with open(ver_file) as f:
exec(f.read())
source_version = __version__
currentdir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(currentdir, 'tools'))
import buildmodref
# autogenerate api documentation
# (see https://github.com/rtfd/readthedocs.org/issues/1139)
def generateapidoc(_):
output_path = os.path.join(currentdir, 'reference')
buildmodref.writeapi(project, output_path, source_version, True)
def setup(app):
app.connect('autodoc-process-docstring', generate_example_rst)
app.connect('builder-inited', generateapidoc)
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None,
'http://scikit-learn.org/stable/': None}
| bsd-3-clause |
Silmathoron/NNGT | doc/examples/groups_and_metagroups.py | 1 | 3612 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# This file is part of the NNGT project to generate and analyze
# neuronal networks and their activity.
# Copyright (C) 2015-2019 Tanguy Fardet
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Generation of multi-group networks containing metagroups """
import nngt
import nngt.generation as ng
import numpy as np
'''
Make a mixed excitatory and inhibitory population, then subdived it in subgroups
'''
num_neurons = 1000
pop = nngt.NeuralPop.exc_and_inhib(num_neurons)
# create two separated subgroups associated to two shapes where the neurons
# will be seeded
# we select 500 random nodes for the left group
left_nodes = np.random.choice([i for i in range(num_neurons)],
500, replace=False)
left = nngt.NeuralGroup(left_nodes, neuron_type=None) # here we first create...
pop.add_meta_group(left, "left") # ... then add
# right group is the complement
right_nodes = list(set(pop.ids).difference(left_nodes))
right = pop.create_meta_group(right_nodes, "right") # here both in one call
# create another pair of random metagroups
# we select 500 random nodes for the left group
group1 = pop.create_meta_group([i for i in range(500)], "g1")
group2 = pop.create_meta_group([i for i in range(500, num_neurons)], "g2")
'''
We then create the shapes associated to the left and right groups and seed
the neurons accordingly in the network
'''
left_shape = nngt.geometry.Shape.disk(300, (-300, 0))
right_shape = nngt.geometry.Shape.rectangle(800, 200, (300, 0))
left_pos = left_shape.seed_neurons(left.size)
right_pos = right_shape.seed_neurons(right.size)
# we order the positions according to the neuron ids
positions = np.empty((num_neurons, 2))
for i, p in zip(left_nodes, left_pos):
positions[i] = p
for i, p in zip(right_nodes, right_pos):
positions[i] = p
# create network from this population
net = nngt.Network(population=pop, positions=positions)
'''
Access metagroups
'''
print(pop.meta_groups)
print(pop["left"])
'''
Plot the graph
'''
if nngt.get_config("with_plot"):
import matplotlib.pyplot as plt
# we plot the graph, setting the node shape from the left and right groups
# and the color from the neuronal type (exc. and inhib.)
nngt.plot.draw_network(net, nshape=[left, right], show_environment=False)
plt.show()
# further tests to make sure every configuration works
nngt.plot.draw_network(net, nshape=[left, right], show_environment=False,
simple_nodes=True)
nngt.plot.draw_network(net, nshape=["o" for _ in range(net.node_nb())],
show_environment=False, simple_nodes=True)
nngt.plot.draw_network(net, nshape=["o" for _ in range(net.node_nb())],
show_environment=False)
nngt.plot.draw_network(net, nshape="s", show_environment=False,
simple_nodes=True)
nngt.plot.draw_network(net, nshape="s", show_environment=False)
plt.show()
| gpl-3.0 |
JohnBSmith/JohnBSmith.github.io | templates/Matplotlib/plot-center.py | 1 | 2470 |
# A template for simple plots, viewed on screen
# and included in HTML documents.
import matplotlib as mp
import matplotlib.pyplot as plot
from numpy import arange, array
from math import pi, sin, cos, tan, exp, log
lw_grid = 1.6
lw_line = 2.2
axes_color = "#505050"
blue = [0,0.2,0.6,0.8]
green = [0,0.46,0,0.8]
magenta = [0.6,0,0.4,0.8]
bgreen = [0,0.4,0.4,0.8]
gblue = [0,0.4,0.6,0.8]
black = [0,0,0,0.8]
style = {
"axes.linewidth": lw_grid,
"grid.linewidth": lw_grid,
"grid.linestyle": "solid",
"grid.color": "#e0e0d4",
"lines.linewidth": 2,
# "lines.markersize": 10,
# "xtick.labelsize": 20,
# "ytick.labelsize": 20,
}
mp.rcParams.update(style)
fig = plot.figure()
ax = fig.add_subplot(1,1,1)
ax.spines['bottom'].set_color(axes_color)
ax.spines['top'].set_color(axes_color)
ax.spines['right'].set_color(axes_color)
ax.spines['left'].set_color(axes_color)
ax.spines['bottom'].set_zorder(10)
ax.spines['top'].set_zorder(10)
ax.spines['right'].set_zorder(10)
ax.spines['left'].set_zorder(10)
ax.spines['left'].set_position('zero')
ax.spines['bottom'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.xaxis.grid()
ax.yaxis.grid()
ax.set_axisbelow(True)
ax.xaxis.set_tick_params(width=lw_grid, length=10,
pad=8,color=axes_color,direction='inout')
ax.yaxis.set_tick_params(width=lw_grid, length=10,
pad=8,color=axes_color,direction='inout')
ax.set_aspect('equal')
X=[-5,5]
Y=[-5,5]
ax.axis(X+Y)
ax.set_xticks(list(range(X[0],0,1))+list(range(1,X[1]+1,1)))
ax.set_yticks(list(range(Y[0],0,1))+list(range(1,X[1]+1,1)))
def f(x): return x
def g(x): return x*x
def h(x): return 3*sin(pi*x)/(pi*x)
def fx(x): return 4*cos(x)
def fy(x): return 4*sin(x)
x = arange(-10, 10, 0.01)
yf = array(map(f,x))
yg = array(map(g,x))
yh = array(map(h,x))
t = arange(0,2*pi+0.01,0.01)
xt = array(map(fx,t))
yt = array(map(fy,t))
ax.plot(x,yf, color=blue, zorder=3,linewidth=lw_line)
ax.plot(x,yg, color=green, zorder=3,linewidth=lw_line)
ax.plot(x,yh, color=magenta,zorder=3,linewidth=lw_line)
ax.plot(xt,yt,color=bgreen, zorder=3,linewidth=lw_line)
fontsize = 16
ax.text(X[1]-0.6,-0.8,"x", fontsize=fontsize, style="italic", color="#202020")
ax.text(0.2,Y[1]-0.5,"y", fontsize=fontsize, style="italic", color="#202020")
plot.savefig("plot.png",bbox_inches='tight')
# plot.savefig("plot.svg",bbox_inches='tight')
# plot.show()
| cc0-1.0 |
ChanChiChoi/scikit-learn | sklearn/tree/tests/test_tree.py | 72 | 47440 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, return_indicator=True, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._tree import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, X)
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
| bsd-3-clause |
joshpeng/Network-Intrusions-Flask | app/views.py | 1 | 3102 | """
Contains main routes for the Prediction App
"""
from flask import render_template
from flask_wtf import Form
from wtforms import fields
from wtforms.validators import Required
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from . import app, target_names
from .database import *
from .performance import *
class PredictForm(Form):
"""Fields for Predict"""
target_choices = [(choice, choice) for choice in sorted(target_names)]
database_choices = [('train', 'train'), ('test', 'test')]
model_choices = [('Random Forest', 'Random Forest'), ('XGBoost', 'XGBoost')]
# database = fields.SelectField('Database:', choices=database_choices, validators=[Required()])
model = fields.SelectField('Model:', choices=model_choices, default='XGBoost', validators=[Required()])
quantity = fields.IntegerField('# of Test Cases:', default=10, validators=[Required()])
attack = fields.SelectField('Attack Type:', choices=target_choices, validators=[Required()])
submit = fields.SubmitField('Submit')
@app.route('/', methods=('GET', 'POST'))
def index():
"""Index page"""
return render_template('index.html')
@app.route('/code', methods=('GET', 'POST'))
def code():
return render_template('code.html')
@app.route('/predict', methods=('GET', 'POST'))
def predict():
"""Prediction Test Bed page"""
form = PredictForm()
prediction = None
test_query = None
perf = []
quantity = 10
if form.validate_on_submit():
# store the submitted values
submitted_data = form.data
# Retrieve values from form
# database = str(submitted_data['database'])
database = 'test'
model = str(submitted_data['model'])
quantity = int(submitted_data['quantity'])
attack = str(submitted_data['attack'])
# Create array from values
test_query = get_data(database, quantity, attack)
# Clean up data frame
cleaned_df = clean_data(test_query)
# Create test set
X_test = cleaned_df.ix[:,(cleaned_df.columns != 'attack_cat')]
y_test = cleaned_df['attack_cat']
estimator = joblib.load('models/xg_scikit_model.pkl') if model == 'XGBoost' else joblib.load('models/rf_model.pkl')
y_pred = estimator.predict(X_test)
perf.append(round(get_perf(y_test, y_pred, 'acc'), 4))
# Can only do these if binary classification
if (attack != 'all'):
perf.append(round(get_perf(y_test, y_pred, 'prec', attack), 4))
perf.append(round(get_perf(y_test, y_pred, 'rec', attack), 4))
perf.append(round(get_perf(y_test, y_pred, 'f1', attack), 4))
else:
perf.append('N/A')
perf.append('N/A')
perf.append('N/A')
tables = []
if test_query is not None:
tables.append(present_data(test_query, y_pred).head(100).to_html())
# generate_confusion_matrix()
return render_template('testbed.html', form=form, tables=tables, perf=perf, quantity=quantity) | mit |
jakereps/qiime2 | qiime2/metadata/tests/test_io.py | 1 | 42757 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import os.path
import pkg_resources
import tempfile
import unittest
import numpy as np
import pandas as pd
from qiime2.metadata import (Metadata, CategoricalMetadataColumn,
NumericMetadataColumn, MetadataFileError)
def get_data_path(filename):
return pkg_resources.resource_filename('qiime2.metadata.tests',
'data/%s' % filename)
# NOTE: many of the test files in the `data` directory intentionally have
# leading/trailing whitespace characters on some lines, as well as mixed usage
# of spaces, tabs, carriage returns, and newlines. When editing these files,
# please make sure your code editor doesn't strip these leading/trailing
# whitespace characters (e.g. Atom does this by default), nor automatically
# modify the files in some other way such as converting Windows-style CRLF
# line terminators to Unix-style newlines.
#
# When committing changes to the files, carefully review the diff to make sure
# unintended changes weren't introduced.
class TestLoadErrors(unittest.TestCase):
def test_path_does_not_exist(self):
with self.assertRaisesRegex(MetadataFileError,
"Metadata file path doesn't exist"):
Metadata.load(
'/qiime2/unit/tests/hopefully/this/path/does/not/exist')
def test_path_is_directory(self):
fp = get_data_path('valid')
with self.assertRaisesRegex(MetadataFileError,
"path points to something other than a "
"file"):
Metadata.load(fp)
def test_non_utf_8_file(self):
fp = get_data_path('invalid/non-utf-8.tsv')
with self.assertRaisesRegex(MetadataFileError,
'encoded as UTF-8 or ASCII'):
Metadata.load(fp)
def test_utf_16_le_file(self):
fp = get_data_path('invalid/simple-utf-16le.txt')
with self.assertRaisesRegex(MetadataFileError,
'UTF-16 Unicode'):
Metadata.load(fp)
def test_utf_16_be_file(self):
fp = get_data_path('invalid/simple-utf-16be.txt')
with self.assertRaisesRegex(MetadataFileError,
'UTF-16 Unicode'):
Metadata.load(fp)
def test_empty_file(self):
fp = get_data_path('invalid/empty-file')
with self.assertRaisesRegex(MetadataFileError,
'locate header.*file may be empty'):
Metadata.load(fp)
def test_comments_and_empty_rows_only(self):
fp = get_data_path('invalid/comments-and-empty-rows-only.tsv')
with self.assertRaisesRegex(MetadataFileError,
'locate header.*only of comments or empty '
'rows'):
Metadata.load(fp)
def test_header_only(self):
fp = get_data_path('invalid/header-only.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_header_only_with_comments_and_empty_rows(self):
fp = get_data_path(
'invalid/header-only-with-comments-and-empty-rows.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_qiime1_empty_mapping_file(self):
fp = get_data_path('invalid/qiime1-empty.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_invalid_header(self):
fp = get_data_path('invalid/invalid-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'unrecognized ID column name.*'
'invalid_id_header'):
Metadata.load(fp)
def test_empty_id(self):
fp = get_data_path('invalid/empty-id.tsv')
with self.assertRaisesRegex(MetadataFileError, 'empty metadata ID'):
Metadata.load(fp)
def test_whitespace_only_id(self):
fp = get_data_path('invalid/whitespace-only-id.tsv')
with self.assertRaisesRegex(MetadataFileError, 'empty metadata ID'):
Metadata.load(fp)
def test_empty_column_name(self):
fp = get_data_path('invalid/empty-column-name.tsv')
with self.assertRaisesRegex(MetadataFileError,
'column without a name'):
Metadata.load(fp)
def test_whitespace_only_column_name(self):
fp = get_data_path('invalid/whitespace-only-column-name.tsv')
with self.assertRaisesRegex(MetadataFileError,
'column without a name'):
Metadata.load(fp)
def test_duplicate_ids(self):
fp = get_data_path('invalid/duplicate-ids.tsv')
with self.assertRaisesRegex(MetadataFileError,
'IDs must be unique.*id1'):
Metadata.load(fp)
def test_duplicate_ids_with_whitespace(self):
fp = get_data_path('invalid/duplicate-ids-with-whitespace.tsv')
with self.assertRaisesRegex(MetadataFileError,
'IDs must be unique.*id1'):
Metadata.load(fp)
def test_duplicate_column_names(self):
fp = get_data_path('invalid/duplicate-column-names.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Column names must be unique.*col1'):
Metadata.load(fp)
def test_duplicate_column_names_with_whitespace(self):
fp = get_data_path(
'invalid/duplicate-column-names-with-whitespace.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Column names must be unique.*col1'):
Metadata.load(fp)
def test_id_conflicts_with_id_header(self):
fp = get_data_path('invalid/id-conflicts-with-id-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
"ID 'id' conflicts.*ID column header"):
Metadata.load(fp)
def test_column_name_conflicts_with_id_header(self):
fp = get_data_path(
'invalid/column-name-conflicts-with-id-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column name 'featureid' conflicts.*ID "
"column header"):
Metadata.load(fp)
def test_column_types_unrecognized_column_name(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
'not_a_column.*column_types.*not a column '
'in the metadata file'):
Metadata.load(fp, column_types={'not_a_column': 'numeric'})
def test_column_types_unrecognized_column_type(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
'col2.*column_types.*unrecognized column '
'type.*CATEGORICAL'):
Metadata.load(fp, column_types={'col1': 'numeric',
'col2': 'CATEGORICAL'})
def test_column_types_not_convertible_to_numeric(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column 'col3' to numeric.*could not be "
"interpreted as numeric: 'bar', 'foo'"):
Metadata.load(fp, column_types={'col1': 'numeric',
'col2': 'categorical',
'col3': 'numeric'})
def test_column_types_override_directive_not_convertible_to_numeric(self):
fp = get_data_path('valid/simple-with-directive.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column 'col3' to numeric.*could not be "
"interpreted as numeric: 'bar', 'foo'"):
Metadata.load(fp, column_types={'col3': 'numeric'})
def test_directive_before_header(self):
fp = get_data_path('invalid/directive-before-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'directive.*#q2:types.*searching for '
'header'):
Metadata.load(fp)
def test_unrecognized_directive(self):
fp = get_data_path('invalid/unrecognized-directive.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Unrecognized directive.*#q2:foo.*'
'#q2:types directive is supported'):
Metadata.load(fp)
def test_duplicate_directives(self):
fp = get_data_path('invalid/duplicate-directives.tsv')
with self.assertRaisesRegex(MetadataFileError,
'duplicate directive.*#q2:types'):
Metadata.load(fp)
def test_unrecognized_column_type_in_directive(self):
fp = get_data_path('invalid/unrecognized-column-type.tsv')
with self.assertRaisesRegex(MetadataFileError,
'col2.*unrecognized column type.*foo.*'
'#q2:types directive'):
Metadata.load(fp)
def test_column_types_directive_not_convertible_to_numeric(self):
fp = get_data_path('invalid/types-directive-non-numeric.tsv')
# This error message regex is intentionally verbose because we want to
# assert that many different types of non-numeric strings aren't
# interpreted as numbers. The error message displays a sorted list of
# all values that couldn't be converted to numbers, making it possible
# to test a variety of non-numeric strings in a single test case.
msg = (r"column 'col2' to numeric.*could not be interpreted as "
r"numeric: '\$42', '\+inf', '-inf', '0xAF', '1,000', "
r"'1\.000\.0', '1_000_000', '1e3e4', 'Infinity', 'NA', 'NaN', "
"'a', 'e3', 'foo', 'inf', 'nan', 'sample-1'")
with self.assertRaisesRegex(MetadataFileError, msg):
Metadata.load(fp)
def test_directive_after_directives_section(self):
fp = get_data_path(
'invalid/directive-after-directives-section.tsv')
with self.assertRaisesRegex(MetadataFileError,
'#q2:types.*outside of the directives '
'section'):
Metadata.load(fp)
def test_directive_longer_than_header(self):
fp = get_data_path('invalid/directive-longer-than-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'row has 5 cells.*header declares 4 '
'cells'):
Metadata.load(fp)
def test_data_longer_than_header(self):
fp = get_data_path('invalid/data-longer-than-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'row has 5 cells.*header declares 4 '
'cells'):
Metadata.load(fp)
class TestLoadSuccess(unittest.TestCase):
def setUp(self):
self.temp_dir_obj = tempfile.TemporaryDirectory(
prefix='qiime2-metadata-tests-temp-')
self.temp_dir = self.temp_dir_obj.name
# This Metadata object is compared against observed Metadata objects in
# many of the tests, so just define it once here.
self.simple_md = Metadata(
pd.DataFrame({'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
# Basic sanity check to make sure the columns are ordered and typed as
# expected. It'd be unfortunate to compare observed results to expected
# results that aren't representing what we think they are!
obs_columns = [(name, props.type)
for name, props in self.simple_md.columns.items()]
exp_columns = [('col1', 'numeric'), ('col2', 'categorical'),
('col3', 'categorical')]
self.assertEqual(obs_columns, exp_columns)
def tearDown(self):
self.temp_dir_obj.cleanup()
def test_simple(self):
# Simple metadata file without comments, empty rows, jaggedness,
# missing data, odd IDs or column names, directives, etc. The file has
# multiple column types (numeric, categorical, and something that has
# mixed numbers and strings, which must be interpreted as categorical).
fp = get_data_path('valid/simple.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_bom_simple_txt(self):
# This is the encoding that notepad.exe will use most commonly
fp = get_data_path('valid/BOM-simple.txt')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_different_file_extension(self):
fp = get_data_path('valid/simple.txt')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_no_newline_at_eof(self):
fp = get_data_path('valid/no-newline-at-eof.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_unix_line_endings(self):
fp = get_data_path('valid/unix-line-endings.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_windows_line_endings(self):
fp = get_data_path('valid/windows-line-endings.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_mac_line_endings(self):
fp = get_data_path('valid/mac-line-endings.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_no_source_artifacts(self):
fp = get_data_path('valid/simple.tsv')
metadata = Metadata.load(fp)
self.assertEqual(metadata.artifacts, ())
def test_retains_column_order(self):
# Explicitly test that the file's column order is retained in the
# Metadata object. Many of the test cases use files with column names
# in alphabetical order (e.g. "col1", "col2", "col3"), which matches
# how pandas orders columns in a DataFrame when supplied with a dict
# (many of the test cases use this feature of the DataFrame
# constructor when constructing the expected DataFrame).
fp = get_data_path('valid/column-order.tsv')
obs_md = Metadata.load(fp)
# Supply DataFrame constructor with explicit column ordering instead of
# a dict.
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_columns = ['z', 'y', 'x']
exp_data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar'],
[3.0, 'c', '42']
]
exp_df = pd.DataFrame(exp_data, index=exp_index, columns=exp_columns)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_leading_trailing_whitespace(self):
fp = get_data_path('valid/leading-trailing-whitespace.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_comments(self):
fp = get_data_path('valid/comments.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_empty_rows(self):
fp = get_data_path('valid/empty-rows.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_qiime1_mapping_file(self):
fp = get_data_path('valid/qiime1.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='#SampleID')
exp_df = pd.DataFrame({'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_qiita_sample_information_file(self):
fp = get_data_path('valid/qiita-sample-information.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id.1', 'id.2'], name='sample_name')
exp_df = pd.DataFrame({
'DESCRIPTION': ['description 1', 'description 2'],
'TITLE': ['A Title', 'Another Title']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_qiita_preparation_information_file(self):
fp = get_data_path('valid/qiita-preparation-information.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id.1', 'id.2'], name='sample_name')
exp_df = pd.DataFrame({
'BARCODE': ['ACGT', 'TGCA'],
'EXPERIMENT_DESIGN_DESCRIPTION': ['longitudinal study',
'longitudinal study']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_biom_observation_metadata_file(self):
fp = get_data_path('valid/biom-observation-metadata.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['OTU_1', 'OTU_2'], name='#OTUID')
exp_df = pd.DataFrame([['k__Bacteria;p__Firmicutes', 0.890],
['k__Bacteria', 0.9999]],
columns=['taxonomy', 'confidence'],
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_supported_id_headers(self):
case_insensitive = {
'id', 'sampleid', 'sample id', 'sample-id', 'featureid',
'feature id', 'feature-id'
}
exact_match = {
'#SampleID', '#Sample ID', '#OTUID', '#OTU ID', 'sample_name'
}
# Build a set of supported headers, including exact matches and headers
# with different casing.
headers = set()
for header in case_insensitive:
headers.add(header)
headers.add(header.upper())
headers.add(header.title())
for header in exact_match:
headers.add(header)
fp = os.path.join(self.temp_dir, 'metadata.tsv')
count = 0
for header in headers:
with open(fp, 'w') as fh:
fh.write('%s\tcolumn\nid1\tfoo\nid2\tbar\n' % header)
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2'], name=header)
exp_df = pd.DataFrame({'column': ['foo', 'bar']}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
count += 1
# Since this test case is a little complicated, make sure that the
# expected number of comparisons are happening.
self.assertEqual(count, 26)
def test_recommended_ids(self):
fp = get_data_path('valid/recommended-ids.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['c6ca034a-223f-40b4-a0e0-45942912a5ea', 'My.ID'],
name='id')
exp_df = pd.DataFrame({'col1': ['foo', 'bar']}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_non_standard_characters(self):
# Test that non-standard characters in IDs, column names, and cells are
# handled correctly. The test case isn't exhaustive (e.g. it doesn't
# test every Unicode character; that would be a nice additional test
# case to have in the future). Instead, this test aims to be more of an
# integration test for the robustness of the reader to non-standard
# data. Many of the characters and their placement within the data file
# are based on use-cases/bugs reported on the forum, Slack, etc. The
# data file has comments explaining these test case choices in more
# detail.
fp = get_data_path('valid/non-standard-characters.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['©id##1', '((id))2', "'id_3<>'", '"id#4"',
'i d\r\t\n5'], name='id')
exp_columns = ['↩c@l1™', 'col(#2)', "#col'3", '"<col_4>"',
'col\t \r\n5']
exp_data = [
['ƒoo', '(foo)', '#f o #o', 'fo\ro', np.nan],
["''2''", 'b#r', 'ba\nr', np.nan, np.nan],
['b"ar', 'c\td', '4\r\n2', np.nan, np.nan],
['b__a_z', '<42>', '>42', np.nan, np.nan],
['baz', np.nan, '42']
]
exp_df = pd.DataFrame(exp_data, index=exp_index, columns=exp_columns)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_missing_data(self):
fp = get_data_path('valid/missing-data.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['None', 'nan', 'NA'], name='id')
exp_df = pd.DataFrame(collections.OrderedDict([
('col1', [1.0, np.nan, np.nan]),
('NA', [np.nan, np.nan, np.nan]),
('col3', ['null', 'N/A', 'NA']),
('col4', np.array([np.nan, np.nan, np.nan], dtype=object))]),
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
# Test that column types are correct (mainly for the two empty columns;
# one should be numeric, the other categorical).
obs_columns = [(name, props.type)
for name, props in obs_md.columns.items()]
exp_columns = [('col1', 'numeric'), ('NA', 'numeric'),
('col3', 'categorical'), ('col4', 'categorical')]
self.assertEqual(obs_columns, exp_columns)
def test_minimal_file(self):
# Simplest possible metadata file consists of one ID and zero columns.
fp = get_data_path('valid/minimal.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['a'], name='id')
exp_df = pd.DataFrame({}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_single_id(self):
fp = get_data_path('valid/single-id.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1'], name='id')
exp_df = pd.DataFrame({'col1': [1.0], 'col2': ['a'], 'col3': ['foo']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_no_columns(self):
fp = get_data_path('valid/no-columns.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['a', 'b', 'my-id'], name='id')
exp_df = pd.DataFrame({}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_single_column(self):
fp = get_data_path('valid/single-column.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': [1.0, 2.0, 3.0]}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_trailing_columns(self):
fp = get_data_path('valid/trailing-columns.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_jagged_trailing_columns(self):
# Test case based on https://github.com/qiime2/qiime2/issues/335
fp = get_data_path('valid/jagged-trailing-columns.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_padding_rows_shorter_than_header(self):
fp = get_data_path('valid/rows-shorter-than-header.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': [1.0, 2.0, np.nan],
'col2': ['a', np.nan, np.nan],
'col3': [np.nan, np.nan, np.nan]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_all_cells_padded(self):
fp = get_data_path('valid/all-cells-padded.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': [np.nan, np.nan, np.nan],
'col2': [np.nan, np.nan, np.nan],
'col3': [np.nan, np.nan, np.nan]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_does_not_cast_ids_or_column_names(self):
fp = get_data_path('valid/no-id-or-column-name-type-cast.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['0.000001', '0.004000', '0.000000'],
dtype=object, name='id')
exp_columns = ['42.0', '1000', '-4.2']
exp_data = [
[2.0, 'b', 2.5],
[1.0, 'b', 4.2],
[3.0, 'c', -9.999]
]
exp_df = pd.DataFrame(exp_data, index=exp_index, columns=exp_columns)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_numeric_column(self):
fp = get_data_path('valid/numeric-column.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3', 'id4', 'id5', 'id6', 'id7',
'id8', 'id9', 'id10', 'id11', 'id12'], name='id')
exp_df = pd.DataFrame({'col1': [0.0, 2.0, 0.0003, -4.2, 1e-4, 1e4,
1.5e2, np.nan, 1.0, 0.5, 1e-8, -0.0]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_numeric_column_as_categorical(self):
fp = get_data_path('valid/numeric-column.tsv')
obs_md = Metadata.load(fp, column_types={'col1': 'categorical'})
exp_index = pd.Index(['id1', 'id2', 'id3', 'id4', 'id5', 'id6', 'id7',
'id8', 'id9', 'id10', 'id11', 'id12'], name='id')
exp_df = pd.DataFrame({'col1': ['0', '2.0', '0.00030', '-4.2', '1e-4',
'1e4', '+1.5E+2', np.nan, '1.', '.5',
'1e-08', '-0']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_with_complete_types_directive(self):
fp = get_data_path('valid/complete-types-directive.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_with_partial_types_directive(self):
fp = get_data_path('valid/partial-types-directive.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_with_empty_types_directive(self):
fp = get_data_path('valid/empty-types-directive.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_with_case_insensitive_types_directive(self):
fp = get_data_path('valid/case-insensitive-types-directive.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': [-5.0, 0.0, 42.0]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_column_types_without_directive(self):
fp = get_data_path('valid/simple.tsv')
obs_md = Metadata.load(fp, column_types={'col1': 'categorical'})
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_column_types_override_directive(self):
fp = get_data_path('valid/simple-with-directive.tsv')
obs_md = Metadata.load(fp, column_types={'col1': 'categorical',
'col2': 'categorical'})
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
class TestSave(unittest.TestCase):
def setUp(self):
self.temp_dir_obj = tempfile.TemporaryDirectory(
prefix='qiime2-metadata-tests-temp-')
self.temp_dir = self.temp_dir_obj.name
self.filepath = os.path.join(self.temp_dir, 'metadata.tsv')
def tearDown(self):
self.temp_dir_obj.cleanup()
def test_simple(self):
md = Metadata(pd.DataFrame(
{'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md.save(self.filepath)
with open(self.filepath, 'r') as fh:
obs = fh.read()
exp = (
"id\tcol1\tcol2\tcol3\n"
"#q2:types\tnumeric\tcategorical\tcategorical\n"
"id1\t1\ta\tfoo\n"
"id2\t2\tb\tbar\n"
"id3\t3\tc\t42\n"
)
self.assertEqual(obs, exp)
def test_no_bom(self):
md = Metadata(pd.DataFrame(
{'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md.save(self.filepath)
with open(self.filepath, 'rb') as fh:
obs = fh.read(2)
self.assertEqual(obs, b'id')
def test_different_file_extension(self):
md = Metadata(pd.DataFrame(
{'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
filepath = os.path.join(self.temp_dir, 'metadata.txt')
md.save(filepath)
with open(filepath, 'r') as fh:
obs = fh.read()
exp = (
"id\tcol1\tcol2\tcol3\n"
"#q2:types\tnumeric\tcategorical\tcategorical\n"
"id1\t1\ta\tfoo\n"
"id2\t2\tb\tbar\n"
"id3\t3\tc\t42\n"
)
self.assertEqual(obs, exp)
def test_some_missing_data(self):
md = Metadata(
pd.DataFrame({'col1': [42.0, np.nan, -3.5],
'col2': ['a', np.nan, np.nan]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md.save(self.filepath)
with open(self.filepath, 'r') as fh:
obs = fh.read()
exp = (
"id\tcol1\tcol2\n"
"#q2:types\tnumeric\tcategorical\n"
"id1\t42\ta\n"
"id2\t\t\n"
"id3\t-3.5\t\n"
)
self.assertEqual(obs, exp)
def test_all_missing_data(self):
# nan-only columns that are numeric or categorical.
md = Metadata(
pd.DataFrame({'col1': [np.nan, np.nan, np.nan],
'col2': np.array([np.nan, np.nan, np.nan],
dtype=object)},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md.save(self.filepath)
with open(self.filepath, 'r') as fh:
obs = fh.read()
exp = (
"id\tcol1\tcol2\n"
"#q2:types\tnumeric\tcategorical\n"
"id1\t\t\n"
"id2\t\t\n"
"id3\t\t\n"
)
self.assertEqual(obs, exp)
def test_unsorted_column_order(self):
index = pd.Index(['id1', 'id2', 'id3'], name='id')
columns = ['z', 'b', 'y']
data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar'],
[3.0, 'c', '42']
]
md = Metadata(pd.DataFrame(data, index=index, columns=columns))
md.save(self.filepath)
with open(self.filepath, 'r') as fh:
obs = fh.read()
exp = (
"id\tz\tb\ty\n"
"#q2:types\tnumeric\tcategorical\tcategorical\n"
"id1\t1\ta\tfoo\n"
"id2\t2\tb\tbar\n"
"id3\t3\tc\t42\n"
)
self.assertEqual(obs, exp)
def test_alternate_id_header(self):
md = Metadata(pd.DataFrame(
{'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='#SampleID')))
md.save(self.filepath)
with open(self.filepath, 'r') as fh:
obs = fh.read()
exp = (
"#SampleID\tcol1\tcol2\tcol3\n"
"#q2:types\tnumeric\tcategorical\tcategorical\n"
"id1\t1\ta\tfoo\n"
"id2\t2\tb\tbar\n"
"id3\t3\tc\t42\n"
)
self.assertEqual(obs, exp)
def test_various_numbers(self):
numbers = [
0.0, -0.0, np.nan, 1.0, 42.0, -33.0, 1e-10, 1.5e15, 0.0003, -4.234,
# This last number should be rounded because it exceeds 15 digits
# of precision.
12.34567891234567
]
index = pd.Index(['id1', 'id2', 'id3', 'id4', 'id5', 'id6', 'id7',
'id8', 'id9', 'id10', 'id11'], name='ID')
md = Metadata(pd.DataFrame({'numbers': numbers}, index=index))
md.save(self.filepath)
with open(self.filepath, 'r') as fh:
obs = fh.read()
exp = (
"ID\tnumbers\n"
"#q2:types\tnumeric\n"
"id1\t0\n"
"id2\t-0\n"
"id3\t\n"
"id4\t1\n"
"id5\t42\n"
"id6\t-33\n"
"id7\t1e-10\n"
"id8\t1.5e+15\n"
"id9\t0.0003\n"
"id10\t-4.234\n"
"id11\t12.3456789123457\n"
)
self.assertEqual(obs, exp)
def test_minimal(self):
md = Metadata(pd.DataFrame({}, index=pd.Index(['my-id'], name='id')))
md.save(self.filepath)
with open(self.filepath, 'r') as fh:
obs = fh.read()
exp = (
"id\n"
"#q2:types\n"
"my-id\n"
)
self.assertEqual(obs, exp)
def test_single_id(self):
md = Metadata(pd.DataFrame(
{'col1': ['foo'], 'col2': [4.002]},
index=pd.Index(['my-id'], name='featureid')))
md.save(self.filepath)
with open(self.filepath, 'r') as fh:
obs = fh.read()
exp = (
"featureid\tcol1\tcol2\n"
"#q2:types\tcategorical\tnumeric\n"
"my-id\tfoo\t4.002\n"
)
self.assertEqual(obs, exp)
def test_no_columns(self):
md = Metadata(pd.DataFrame(
{}, index=pd.Index(['foo', 'bar', 'baz'], name='id')))
md.save(self.filepath)
with open(self.filepath, 'r') as fh:
obs = fh.read()
exp = (
"id\n"
"#q2:types\n"
"foo\n"
"bar\n"
"baz\n"
)
self.assertEqual(obs, exp)
def test_single_column(self):
md = Metadata(pd.DataFrame(
{'col1': ['42', '4.3', '4.4000']},
index=pd.Index(['foo', 'bar', 'baz'], name='id')))
md.save(self.filepath)
with open(self.filepath, 'r') as fh:
obs = fh.read()
exp = (
"id\tcol1\n"
"#q2:types\tcategorical\n"
"foo\t42\n"
"bar\t4.3\n"
"baz\t4.4000\n"
)
self.assertEqual(obs, exp)
def test_ids_and_column_names_as_numeric_strings(self):
index = pd.Index(['0.000001', '0.004000', '0.000000'],
dtype=object, name='id')
columns = ['42.0', '1000', '-4.2']
data = [
[2.0, 'b', 2.5],
[1.0, 'b', 4.2],
[3.0, 'c', -9.999]
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
md.save(self.filepath)
with open(self.filepath, 'r') as fh:
obs = fh.read()
exp = (
"id\t42.0\t1000\t-4.2\n"
"#q2:types\tnumeric\tcategorical\tnumeric\n"
"0.000001\t2\tb\t2.5\n"
"0.004000\t1\tb\t4.2\n"
"0.000000\t3\tc\t-9.999\n"
)
self.assertEqual(obs, exp)
# A couple of basic tests for CategoricalMetadataColumn and
# NumericMetadataColumn below. Those classes simply transform themselves
# into single-column Metadata objects within `MetadataColumn.save()` and
# use the same writer code from there on.
def test_categorical_metadata_column(self):
mdc = CategoricalMetadataColumn(pd.Series(
['foo', 'bar', '42.50'], name='categorical-column',
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
mdc.save(self.filepath)
with open(self.filepath, 'r') as fh:
obs = fh.read()
exp = (
"id\tcategorical-column\n"
"#q2:types\tcategorical\n"
"id1\tfoo\n"
"id2\tbar\n"
"id3\t42.50\n"
)
self.assertEqual(obs, exp)
def test_numeric_metadata_column(self):
mdc = NumericMetadataColumn(pd.Series(
[1e-15, 42.50, -999.0], name='numeric-column',
index=pd.Index(['id1', 'id2', 'id3'], name='#OTU ID')))
mdc.save(self.filepath)
with open(self.filepath, 'r') as fh:
obs = fh.read()
exp = (
"#OTU ID\tnumeric-column\n"
"#q2:types\tnumeric\n"
"id1\t1e-15\n"
"id2\t42.5\n"
"id3\t-999\n"
)
self.assertEqual(obs, exp)
# TODO this class spot-checks some of the more "difficult" valid files to make
# sure they can be read, written to disk, and read again in a lossless way.
# A more complete strategy (with fewer test cases) would be performing a
# roundtrip test on every valid file under the `data` directory (e.g. with a
# `glob` and for loop).
class TestRoundtrip(unittest.TestCase):
def setUp(self):
self.temp_dir_obj = tempfile.TemporaryDirectory(
prefix='qiime2-metadata-tests-temp-')
self.temp_dir = self.temp_dir_obj.name
self.filepath = os.path.join(self.temp_dir, 'metadata.tsv')
def tearDown(self):
self.temp_dir_obj.cleanup()
def test_simple(self):
fp = get_data_path('valid/simple.tsv')
md1 = Metadata.load(fp)
md1.save(self.filepath)
md2 = Metadata.load(self.filepath)
self.assertEqual(md1, md2)
def test_non_standard_characters(self):
fp = get_data_path('valid/non-standard-characters.tsv')
md1 = Metadata.load(fp)
md1.save(self.filepath)
md2 = Metadata.load(self.filepath)
self.assertEqual(md1, md2)
def test_missing_data(self):
fp = get_data_path('valid/missing-data.tsv')
md1 = Metadata.load(fp)
md1.save(self.filepath)
md2 = Metadata.load(self.filepath)
self.assertEqual(md1, md2)
def test_minimal_file(self):
fp = get_data_path('valid/minimal.tsv')
md1 = Metadata.load(fp)
md1.save(self.filepath)
md2 = Metadata.load(self.filepath)
self.assertEqual(md1, md2)
def test_numeric_column(self):
fp = get_data_path('valid/numeric-column.tsv')
md1 = Metadata.load(fp)
md1.save(self.filepath)
md2 = Metadata.load(self.filepath)
self.assertEqual(md1, md2)
def test_all_cells_padded(self):
fp = get_data_path('valid/all-cells-padded.tsv')
md1 = Metadata.load(fp)
md1.save(self.filepath)
md2 = Metadata.load(self.filepath)
self.assertEqual(md1, md2)
def test_categorical_metadata_column(self):
fp = get_data_path('valid/simple.tsv')
md1 = Metadata.load(fp)
mdc1 = md1.get_column('col2')
self.assertIsInstance(mdc1, CategoricalMetadataColumn)
mdc1.save(self.filepath)
md2 = Metadata.load(self.filepath)
mdc2 = md2.get_column('col2')
self.assertIsInstance(mdc1, CategoricalMetadataColumn)
self.assertEqual(mdc1, mdc2)
def test_numeric_metadata_column(self):
fp = get_data_path('valid/simple.tsv')
md1 = Metadata.load(fp)
mdc1 = md1.get_column('col1')
self.assertIsInstance(mdc1, NumericMetadataColumn)
mdc1.save(self.filepath)
md2 = Metadata.load(self.filepath)
mdc2 = md2.get_column('col1')
self.assertIsInstance(mdc1, NumericMetadataColumn)
self.assertEqual(mdc1, mdc2)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
gem/oq-engine | openquake/calculators/base.py | 1 | 55094 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2021 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import abc
import pdb
import logging
import operator
import traceback
from datetime import datetime
from shapely import wkt
import h5py
import numpy
import pandas
from openquake.baselib import (
general, hdf5, __version__ as engine_version)
from openquake.baselib import parallel, python3compat
from openquake.baselib.performance import Monitor
from openquake.hazardlib import InvalidFile, site, stats
from openquake.hazardlib.site_amplification import Amplifier
from openquake.hazardlib.site_amplification import AmplFunction
from openquake.hazardlib.calc.filters import SourceFilter, getdefault
from openquake.hazardlib.source import rupture
from openquake.hazardlib.shakemap.maps import get_sitecol_shakemap
from openquake.hazardlib.shakemap.gmfs import to_gmfs
from openquake.risklib import riskinput, riskmodels
from openquake.commonlib import readinput, logictree, datastore
from openquake.calculators.export import export as exp
from openquake.calculators import getters
get_taxonomy = operator.attrgetter('taxonomy')
get_weight = operator.attrgetter('weight')
get_imt = operator.attrgetter('imt')
calculators = general.CallableDict(operator.attrgetter('calculation_mode'))
U8 = numpy.uint8
U16 = numpy.uint16
U32 = numpy.uint32
F32 = numpy.float32
TWO16 = 2 ** 16
TWO32 = 2 ** 32
stats_dt = numpy.dtype([('mean', F32), ('std', F32),
('min', F32), ('max', F32), ('len', U16)])
# this is used for the minimum_intensity dictionaries
def consistent(dic1, dic2):
"""
Check if two dictionaries with default are consistent:
>>> consistent({'PGA': 0.05, 'SA(0.3)': 0.05}, {'default': 0.05})
True
>>> consistent({'SA(0.3)': 0.1, 'SA(0.6)': 0.05},
... {'default': 0.1, 'SA(0.3)': 0.1, 'SA(0.6)': 0.05})
True
"""
if dic1 == dic2:
return True
v1 = set(dic1.values())
v2 = set(dic2.values())
missing = set(dic2) - set(dic1) - {'default'}
if len(v1) == 1 and len(v2) == 1 and v1 == v2:
# {'PGA': 0.05, 'SA(0.3)': 0.05} is consistent with {'default': 0.05}
return True
return not missing
def get_stats(seq):
std = numpy.nan if len(seq) == 1 else numpy.std(seq, ddof=1)
tup = (numpy.mean(seq), std, numpy.min(seq), numpy.max(seq), len(seq))
return numpy.array(tup, stats_dt)
class InvalidCalculationID(Exception):
"""
Raised when running a post-calculation on top of an incompatible
pre-calculation
"""
def fix_ones(pmap):
"""
Physically, an extremely small intensity measure level can have an
extremely large probability of exceedence, however that probability
cannot be exactly 1 unless the level is exactly 0. Numerically, the
PoE can be 1 and this give issues when calculating the damage (there
is a log(0) in
:class:`openquake.risklib.scientific.annual_frequency_of_exceedence`).
Here we solve the issue by replacing the unphysical probabilities 1
with .9999999999999999 (the float64 closest to 1).
"""
for sid in pmap:
array = pmap[sid].array
array[array == 1.] = .9999999999999999
return pmap
def build_weights(realizations):
"""
:returns: an array with the realization weights of shape R
"""
arr = numpy.array([rlz.weight['default'] for rlz in realizations])
return arr
def set_array(longarray, shortarray):
"""
:param longarray: a numpy array of floats of length L >= l
:param shortarray: a numpy array of floats of length l
Fill `longarray` with the values of `shortarray`, starting from the left.
If `shortarry` is shorter than `longarray`, then the remaining elements on
the right are filled with `numpy.nan` values.
"""
longarray[:len(shortarray)] = shortarray
longarray[len(shortarray):] = numpy.nan
class BaseCalculator(metaclass=abc.ABCMeta):
"""
Abstract base class for all calculators.
:param oqparam: OqParam object
:param monitor: monitor object
:param calc_id: numeric calculation ID
"""
precalc = None
accept_precalc = []
from_engine = False # set by engine.run_calc
is_stochastic = False # True for scenario and event based calculators
def __init__(self, oqparam, calc_id):
self.datastore = datastore.new(calc_id, oqparam)
self._monitor = Monitor(
'%s.run' % self.__class__.__name__, measuremem=True,
h5=self.datastore)
# NB: using h5=self.datastore.hdf5 would mean losing the performance
# info about Calculator.run since the file will be closed later on
self.oqparam = oqparam
def pre_checks(self):
"""
Checks to run after the pre_execute but before the execute
"""
def monitor(self, operation='', **kw):
"""
:returns: a new Monitor instance
"""
mon = self._monitor(operation, h5=self.datastore.hdf5)
self._monitor.calc_id = mon.calc_id = self.datastore.calc_id
vars(mon).update(kw)
return mon
def save_params(self, **kw):
"""
Update the current calculation parameters and save engine_version
"""
if ('hazard_calculation_id' in kw and
kw['hazard_calculation_id'] is None):
del kw['hazard_calculation_id']
vars(self.oqparam).update(**kw)
self.datastore['oqparam'] = self.oqparam
attrs = self.datastore['/'].attrs
attrs['engine_version'] = engine_version
attrs['date'] = datetime.now().isoformat()[:19]
if 'checksum32' not in attrs:
attrs['checksum32'] = readinput.get_checksum32(
self.oqparam, self.datastore.hdf5)
logging.info('Checksum of the input files: %(checksum32)s', attrs)
self.datastore.flush()
def check_precalc(self, precalc_mode):
"""
Defensive programming against users providing an incorrect
pre-calculation ID (with ``--hazard-calculation-id``).
:param precalc_mode:
calculation_mode of the previous calculation
"""
calc_mode = self.oqparam.calculation_mode
ok_mode = self.accept_precalc
if calc_mode != precalc_mode and precalc_mode not in ok_mode:
raise InvalidCalculationID(
'In order to run a calculation of kind %r, '
'you need to provide a calculation of kind %r, '
'but you provided a %r instead' %
(calc_mode, ok_mode, precalc_mode))
def run(self, pre_execute=True, concurrent_tasks=None, remove=True,
shutdown=False, **kw):
"""
Run the calculation and return the exported outputs.
:param pre_execute: set it to False to avoid running pre_execute
:param concurrent_tasks: set it to 0 to disable parallelization
:param remove: set it to False to remove the hdf5cache file (if any)
:param shutdown: set it to True to shutdown the ProcessPool
"""
with self._monitor:
self._monitor.username = kw.get('username', '')
if concurrent_tasks is None: # use the job.ini parameter
ct = self.oqparam.concurrent_tasks
else: # used the parameter passed in the command-line
ct = concurrent_tasks
if ct == 0: # disable distribution temporarily
oq_distribute = os.environ.get('OQ_DISTRIBUTE')
os.environ['OQ_DISTRIBUTE'] = 'no'
if ct != self.oqparam.concurrent_tasks:
# save the used concurrent_tasks
self.oqparam.concurrent_tasks = ct
self.save_params(**kw)
try:
if pre_execute:
self.pre_execute()
self.result = self.execute()
if self.result is not None:
self.post_execute(self.result)
self.export(kw.get('exports', ''))
except Exception:
if kw.get('pdb'): # post-mortem debug
tb = sys.exc_info()[2]
traceback.print_tb(tb)
pdb.post_mortem(tb)
else:
logging.critical('', exc_info=True)
raise
finally:
if shutdown:
parallel.Starmap.shutdown()
# cleanup globals
if ct == 0: # restore OQ_DISTRIBUTE
if oq_distribute is None: # was not set
del os.environ['OQ_DISTRIBUTE']
else:
os.environ['OQ_DISTRIBUTE'] = oq_distribute
readinput.pmap = None
readinput.exposure = None
readinput.gmfs = None
readinput.eids = None
readinput.smlt_cache.clear()
readinput.gsim_lt_cache.clear()
# remove temporary hdf5 file, if any
if os.path.exists(self.datastore.tempname) and remove:
os.remove(self.datastore.tempname)
return getattr(self, 'exported', {})
def core_task(*args):
"""
Core routine running on the workers.
"""
raise NotImplementedError
@abc.abstractmethod
def pre_execute(self):
"""
Initialization phase.
"""
@abc.abstractmethod
def execute(self):
"""
Execution phase. Usually will run in parallel the core
function and return a dictionary with the results.
"""
@abc.abstractmethod
def post_execute(self, result):
"""
Post-processing phase of the aggregated output. It must be
overridden with the export code. It will return a dictionary
of output files.
"""
def gzip_inputs(self):
"""
Gzipping the inputs and saving them in the datastore
"""
logging.info('gzipping the input files')
fnames = readinput.get_input_files(self.oqparam)
self.datastore.store_files(fnames)
def export(self, exports=None):
"""
Export all the outputs in the datastore in the given export formats.
Individual outputs are not exported if there are multiple realizations.
"""
self.exported = getattr(self, 'exported', {})
if isinstance(exports, tuple):
fmts = exports
elif exports: # is a string
fmts = exports.split(',')
elif isinstance(self.oqparam.exports, tuple):
fmts = self.oqparam.exports
else: # is a string
fmts = self.oqparam.exports.split(',')
keys = set(self.datastore) | {'fullreport'}
has_hcurves = ('hcurves-stats' in self.datastore or
'hcurves-rlzs' in self.datastore)
if has_hcurves:
keys.add('hcurves')
for fmt in fmts:
if not fmt:
continue
for key in sorted(keys): # top level keys
if 'rlzs' in key and self.R > 1:
if (key[:-4] + 'stats') in self.datastore:
continue # skip individual curves
self._export((key, fmt))
if has_hcurves and self.oqparam.hazard_maps:
self._export(('hmaps', fmt))
if has_hcurves and self.oqparam.uniform_hazard_spectra:
self._export(('uhs', fmt))
def _export(self, ekey):
if ekey not in exp or self.exported.get(ekey): # already exported
return
with self.monitor('export'):
try:
self.exported[ekey] = fnames = exp(ekey, self.datastore)
except Exception as exc:
fnames = []
logging.error('Could not export %s: %s', ekey, exc)
if fnames:
logging.info('exported %s: %s', ekey[0], fnames)
def __repr__(self):
return '<%s#%d>' % (self.__class__.__name__, self.datastore.calc_id)
def check_time_event(oqparam, occupancy_periods):
"""
Check the `time_event` parameter in the datastore, by comparing
with the periods found in the exposure.
"""
time_event = oqparam.time_event
if time_event and time_event not in occupancy_periods:
raise ValueError(
'time_event is %s in %s, but the exposure contains %s' %
(time_event, oqparam.inputs['job_ini'],
', '.join(occupancy_periods)))
def check_amplification(ampl_df, sitecol):
"""
Make sure the amplification codes in the site collection match the
ones in the amplification table.
:param ampl_df: the amplification table as a pandas DataFrame
:param sitecol: the site collection
"""
codeset = set(ampl_df.index)
if len(codeset) == 1:
# there is a single amplification function, there is no need to
# extend the sitecol with an ampcode field
return
codes = set(sitecol.ampcode)
missing = codes - codeset
if missing:
raise ValueError('The site collection contains references to missing '
'amplification functions: %s' % b' '.join(missing).
decode('utf8'))
class HazardCalculator(BaseCalculator):
"""
Base class for hazard calculators based on source models
"""
def src_filter(self):
"""
:returns: a SourceFilter
"""
oq = self.oqparam
if getattr(self, 'sitecol', None):
sitecol = self.sitecol.complete
else: # can happen to the ruptures-only calculator
sitecol = None
return SourceFilter(sitecol, oq.maximum_distance)
@property
def E(self):
"""
:returns: the number of stored events
"""
try:
return len(self.datastore['events'])
except KeyError:
return 0
@property
def N(self):
"""
:returns: the total number of sites
"""
if hasattr(self, 'sitecol'):
return len(self.sitecol.complete) if self.sitecol else None
return len(self.datastore['sitecol'])
@property
def few_sites(self):
"""
:returns: True if there are less than max_sites_disagg
"""
return len(self.sitecol.complete) <= self.oqparam.max_sites_disagg
def check_overflow(self):
"""Overridden in event based"""
def check_floating_spinning(self):
f, s = self.csm.get_floating_spinning_factors()
if f != 1:
logging.info('Rupture floating factor = %s', f)
if s != 1:
logging.info('Rupture spinning factor = %s', s)
if (f * s >= 1.5 and self.oqparam.pointsource_distance is None
and 'classical' in self.oqparam.calculation_mode):
logging.info(
'You are not using the pointsource_distance approximation:\n'
'https://docs.openquake.org/oq-engine/advanced/common-mistakes.html#pointsource-distance')
def read_inputs(self):
"""
Read risk data and sources if any
"""
oq = self.oqparam
self._read_risk_data()
self.check_overflow() # check if self.sitecol is too large
if ('amplification' in oq.inputs and
oq.amplification_method == 'kernel'):
logging.info('Reading %s', oq.inputs['amplification'])
df = AmplFunction.read_df(oq.inputs['amplification'])
check_amplification(df, self.sitecol)
self.af = AmplFunction.from_dframe(df)
if (oq.calculation_mode == 'disaggregation' and
oq.max_sites_disagg < len(self.sitecol)):
raise ValueError(
'Please set max_sites_disagg=%d in %s' % (
len(self.sitecol), oq.inputs['job_ini']))
if ('source_model_logic_tree' in oq.inputs and
oq.hazard_calculation_id is None):
with self.monitor('composite source model', measuremem=True):
self.csm = csm = readinput.get_composite_source_model(
oq, self.datastore.hdf5)
mags_by_trt = csm.get_mags_by_trt()
oq.maximum_distance.interp(mags_by_trt)
for trt in mags_by_trt:
self.datastore['source_mags/' + trt] = numpy.array(
mags_by_trt[trt])
self.full_lt = csm.full_lt
self.init() # do this at the end of pre-execute
self.pre_checks()
if (not oq.hazard_calculation_id
and oq.calculation_mode != 'preclassical'
and not oq.save_disk_space):
self.gzip_inputs()
# check DEFINED_FOR_REFERENCE_VELOCITY
if self.amplifier:
gsim_lt = readinput.get_gsim_lt(oq)
self.amplifier.check(self.sitecol.vs30, oq.vs30_tolerance,
gsim_lt.values)
def import_perils(self):
"""Defined in MultiRiskCalculator"""
def pre_execute(self):
"""
Check if there is a previous calculation ID.
If yes, read the inputs by retrieving the previous calculation;
if not, read the inputs directly.
"""
oq = self.oqparam
if 'gmfs' in oq.inputs or 'multi_peril' in oq.inputs:
# read hazard from files
assert not oq.hazard_calculation_id, (
'You cannot use --hc together with gmfs_file')
with self.monitor('importing inputs', measuremem=True):
self.read_inputs()
if 'gmfs' in oq.inputs:
self.datastore['full_lt'] = logictree.FullLogicTree.fake()
if oq.inputs['gmfs'].endswith('.csv'):
eids = import_gmfs_csv(self.datastore, oq,
self.sitecol.complete.sids)
elif oq.inputs['gmfs'].endswith('.hdf5'):
eids = import_gmfs_hdf5(self.datastore, oq)
else:
raise NotImplementedError(
'Importer for %s' % oq.inputs['gmfs'])
E = len(eids)
if hasattr(oq, 'number_of_ground_motion_fields'):
if oq.number_of_ground_motion_fields != E:
raise RuntimeError(
'Expected %d ground motion fields, found %d' %
(oq.number_of_ground_motion_fields, E))
else: # set the number of GMFs from the file
oq.number_of_ground_motion_fields = E
else:
self.import_perils()
self.save_crmodel()
elif 'hazard_curves' in oq.inputs: # read hazard from file
assert not oq.hazard_calculation_id, (
'You cannot use --hc together with hazard_curves')
haz_sitecol = readinput.get_site_collection(oq)
self.load_crmodel() # must be after get_site_collection
self.read_exposure(haz_sitecol) # define .assets_by_site
poes = fix_ones(readinput.pmap).array(len(haz_sitecol))
self.datastore['_poes'] = poes.transpose(2, 0, 1) # shape GNL
self.datastore['assetcol'] = self.assetcol
self.datastore['full_lt'] = fake = logictree.FullLogicTree.fake()
self.datastore['rlzs_by_g'] = sum(
fake.get_rlzs_by_grp().values(), [])
with hdf5.File(self.datastore.tempname, 'a') as t:
t['oqparam'] = oq
self.realizations = fake.get_realizations()
self.save_crmodel()
self.datastore.swmr_on()
elif oq.hazard_calculation_id:
parent = datastore.read(oq.hazard_calculation_id)
self.check_precalc(parent['oqparam'].calculation_mode)
self.datastore.parent = parent
# copy missing parameters from the parent
if 'concurrent_tasks' not in vars(self.oqparam):
self.oqparam.concurrent_tasks = (
self.oqparam.__class__.concurrent_tasks.default)
params = {name: value for name, value in
vars(parent['oqparam']).items()
if name not in vars(self.oqparam)}
self.save_params(**params)
with self.monitor('importing inputs', measuremem=True):
self.read_inputs()
oqp = parent['oqparam']
if oqp.investigation_time != oq.investigation_time:
raise ValueError(
'The parent calculation was using investigation_time=%s'
' != %s' % (oqp.investigation_time, oq.investigation_time))
hstats, rstats = list(oqp.hazard_stats()), list(oq.hazard_stats())
if hstats != rstats:
raise ValueError(
'The parent calculation had stats %s != %s' %
(hstats, rstats))
missing_imts = set(oq.risk_imtls) - set(oqp.imtls)
if missing_imts:
raise ValueError(
'The parent calculation is missing the IMT(s) %s' %
', '.join(missing_imts))
self.save_crmodel()
elif self.__class__.precalc:
calc = calculators[self.__class__.precalc](
self.oqparam, self.datastore.calc_id)
calc.from_engine = self.from_engine
calc.pre_checks = lambda: self.__class__.pre_checks(calc)
calc.run(remove=False)
calc.datastore.close()
for name in ('csm param sitecol assetcol crmodel realizations '
'policy_name policy_dict full_lt exported').split():
if hasattr(calc, name):
setattr(self, name, getattr(calc, name))
else:
with self.monitor('importing inputs', measuremem=True):
self.read_inputs()
self.save_crmodel()
def init(self):
"""
To be overridden to initialize the datasets needed by the calculation
"""
oq = self.oqparam
if not oq.risk_imtls:
if self.datastore.parent:
oq.risk_imtls = (
self.datastore.parent['oqparam'].risk_imtls)
if 'full_lt' in self.datastore:
full_lt = self.datastore['full_lt']
self.realizations = full_lt.get_realizations()
if oq.hazard_calculation_id and 'gsim_logic_tree' in oq.inputs:
# redefine the realizations by reading the weights from the
# gsim_logic_tree_file that could be different from the parent
full_lt.gsim_lt = logictree.GsimLogicTree(
oq.inputs['gsim_logic_tree'], set(full_lt.trts))
elif hasattr(self, 'csm'):
self.check_floating_spinning()
self.realizations = self.csm.full_lt.get_realizations()
else: # build a fake; used by risk-from-file calculators
self.datastore['full_lt'] = fake = logictree.FullLogicTree.fake()
self.realizations = fake.get_realizations()
@general.cached_property
def R(self):
"""
:returns: the number of realizations
"""
if self.oqparam.collect_rlzs:
return 1
elif 'weights' in self.datastore:
return len(self.datastore['weights'])
try:
return self.csm.full_lt.get_num_rlzs()
except AttributeError: # no self.csm
return self.datastore['full_lt'].get_num_rlzs()
def read_exposure(self, haz_sitecol): # after load_risk_model
"""
Read the exposure, the risk models and update the attributes
.sitecol, .assetcol
"""
oq = self.oqparam
with self.monitor('reading exposure'):
self.sitecol, self.assetcol, discarded = (
readinput.get_sitecol_assetcol(
oq, haz_sitecol, self.crmodel.loss_types))
self.datastore['sitecol'] = self.sitecol
if len(discarded):
self.datastore['discarded'] = discarded
if 'scenario' in oq.calculation_mode:
# this is normal for the case of scenario from rupture
logging.info('%d assets were discarded because too far '
'from the rupture; use `oq show discarded` '
'to show them and `oq plot_assets` to plot '
'them' % len(discarded))
elif not oq.discard_assets: # raise an error
self.datastore['assetcol'] = self.assetcol
raise RuntimeError(
'%d assets were discarded; use `oq show discarded` to'
' show them and `oq plot_assets` to plot them' %
len(discarded))
if oq.inputs.get('insurance'):
k, v = zip(*oq.inputs['insurance'].items())
self.load_insurance_data(k, v)
return readinput.exposure
def load_insurance_data(self, ins_types, ins_files):
"""
Read the insurance files and populate the policy_dict
"""
for loss_type, fname in zip(ins_types, ins_files):
array = hdf5.read_csv(
fname, {'insurance_limit': float, 'deductible': float,
None: object}).array
policy_name = array.dtype.names[0]
policy_idx = getattr(self.assetcol.tagcol, policy_name + '_idx')
insurance = numpy.zeros((len(policy_idx), 2))
for pol, ded, lim in array[
[policy_name, 'deductible', 'insurance_limit']]:
insurance[policy_idx[pol]] = ded, lim
self.policy_dict[loss_type] = insurance
if self.policy_name and policy_name != self.policy_name:
raise ValueError(
'The file %s contains %s as policy field, but we were '
'expecting %s' % (fname, policy_name, self.policy_name))
else:
self.policy_name = policy_name
def load_crmodel(self):
# to be called before read_exposure
# NB: this is called even if there is no risk model
"""
Read the risk models and set the attribute .crmodel.
The crmodel can be empty for hazard calculations.
Save the loss ratios (if any) in the datastore.
"""
oq = self.oqparam
logging.info('Reading the risk model if present')
self.crmodel = readinput.get_crmodel(oq)
if not self.crmodel:
parent = self.datastore.parent
if 'crm' in parent:
self.crmodel = riskmodels.CompositeRiskModel.read(parent, oq)
return
if oq.ground_motion_fields and not oq.imtls:
raise InvalidFile('No intensity_measure_types specified in %s' %
self.oqparam.inputs['job_ini'])
self.save_params() # re-save oqparam
def save_crmodel(self):
"""
Save the risk models in the datastore
"""
if len(self.crmodel):
logging.info('Storing risk model')
attrs = self.crmodel.get_attrs()
self.datastore.create_df('crm', self.crmodel.to_dframe(),
'gzip', **attrs)
def _read_risk_data(self):
# read the risk model (if any), the exposure (if any) and then the
# site collection, possibly extracted from the exposure.
oq = self.oqparam
self.load_crmodel() # must be called first
if (not oq.imtls and 'shakemap' not in oq.inputs
and oq.ground_motion_fields):
raise InvalidFile('There are no intensity measure types in %s' %
oq.inputs['job_ini'])
if oq.hazard_calculation_id:
with datastore.read(oq.hazard_calculation_id) as dstore:
haz_sitecol = dstore['sitecol'].complete
if ('amplification' in oq.inputs and
'ampcode' not in haz_sitecol.array.dtype.names):
haz_sitecol.add_col('ampcode', site.ampcode_dt)
else:
if 'gmfs' in oq.inputs and oq.inputs['gmfs'].endswith('.hdf5'):
with hdf5.File(oq.inputs['gmfs']) as f:
haz_sitecol = f['sitecol']
else:
haz_sitecol = readinput.get_site_collection(oq, self.datastore)
if hasattr(self, 'rup'):
# for scenario we reduce the site collection to the sites
# within the maximum distance from the rupture
haz_sitecol, _dctx = self.cmaker.filter(
haz_sitecol, self.rup)
haz_sitecol.make_complete()
if 'site_model' in oq.inputs:
self.datastore['site_model'] = readinput.get_site_model(oq)
oq_hazard = (self.datastore.parent['oqparam']
if self.datastore.parent else None)
self.policy_name = ''
self.policy_dict = {}
if 'exposure' in oq.inputs:
exposure = self.read_exposure(haz_sitecol)
self.datastore['assetcol'] = self.assetcol
self.datastore['cost_calculator'] = exposure.cost_calculator
if hasattr(readinput.exposure, 'exposures'):
self.datastore['assetcol/exposures'] = (
numpy.array(exposure.exposures, hdf5.vstr))
elif 'assetcol' in self.datastore.parent:
assetcol = self.datastore.parent['assetcol']
if oq.region:
region = wkt.loads(oq.region)
self.sitecol = haz_sitecol.within(region)
if oq.shakemap_id or 'shakemap' in oq.inputs or oq.shakemap_uri:
self.sitecol, self.assetcol = read_shakemap(
self, haz_sitecol, assetcol)
self.datastore['sitecol'] = self.sitecol
self.datastore['assetcol'] = self.assetcol
elif hasattr(self, 'sitecol') and general.not_equal(
self.sitecol.sids, haz_sitecol.sids):
self.assetcol = assetcol.reduce(self.sitecol)
self.datastore['assetcol'] = self.assetcol
logging.info('Extracted %d/%d assets',
len(self.assetcol), len(assetcol))
else:
self.assetcol = assetcol
else: # no exposure
self.sitecol = haz_sitecol
if self.sitecol and oq.imtls:
logging.info('Read N=%d hazard sites and L=%d hazard levels',
len(self.sitecol), oq.imtls.size)
if oq_hazard:
parent = self.datastore.parent
if 'assetcol' in parent:
check_time_event(oq, parent['assetcol'].occupancy_periods)
elif oq.job_type == 'risk' and 'exposure' not in oq.inputs:
raise ValueError('Missing exposure both in hazard and risk!')
if oq_hazard.time_event and oq_hazard.time_event != oq.time_event:
raise ValueError(
'The risk configuration file has time_event=%s but the '
'hazard was computed with time_event=%s' % (
oq.time_event, oq_hazard.time_event))
if oq.job_type == 'risk':
tmap = readinput.taxonomy_mapping(
self.oqparam, self.assetcol.tagcol.taxonomy)
self.crmodel.tmap = tmap
taxonomies = set()
for ln in oq.loss_names:
for items in self.crmodel.tmap[ln]:
for taxo, weight in items:
if taxo != '?':
taxonomies.add(taxo)
# check that we are covering all the taxonomies in the exposure
missing = taxonomies - set(self.crmodel.taxonomies)
if self.crmodel and missing:
raise RuntimeError('The exposure contains the taxonomies %s '
'which are not in the risk model' % missing)
if len(self.crmodel.taxonomies) > len(taxonomies):
logging.info('Reducing risk model from %d to %d taxonomies',
len(self.crmodel.taxonomies), len(taxonomies))
self.crmodel = self.crmodel.reduce(taxonomies)
self.crmodel.tmap = tmap
self.crmodel.reduce_cons_model(self.assetcol.tagcol)
if hasattr(self, 'sitecol') and self.sitecol:
if 'site_model' in oq.inputs:
assoc_dist = (oq.region_grid_spacing * 1.414
if oq.region_grid_spacing else 5) # Graeme's 5km
sm = readinput.get_site_model(oq)
self.sitecol.complete.assoc(sm, assoc_dist)
self.datastore['sitecol'] = self.sitecol
# store amplification functions if any
self.af = None
if 'amplification' in oq.inputs:
logging.info('Reading %s', oq.inputs['amplification'])
df = AmplFunction.read_df(oq.inputs['amplification'])
check_amplification(df, self.sitecol)
if oq.amplification_method == 'kernel':
# TODO: need to add additional checks on the main calculation
# methodology since the kernel method is currently tested only
# for classical PSHA
self.af = AmplFunction.from_dframe(df)
self.amplifier = None
else:
self.amplifier = Amplifier(oq.imtls, df, oq.soil_intensities)
else:
self.amplifier = None
# manage secondary perils
sec_perils = oq.get_sec_perils()
for sp in sec_perils:
sp.prepare(self.sitecol) # add columns as needed
mal = {lt: getdefault(oq.minimum_asset_loss, lt)
for lt in oq.loss_names}
if mal:
logging.info('minimum_asset_loss=%s', mal)
self.param = dict(individual_curves=oq.individual_curves,
ps_grid_spacing=oq.ps_grid_spacing,
minimum_distance=oq.minimum_distance,
collapse_level=int(oq.collapse_level),
split_sources=oq.split_sources,
avg_losses=oq.avg_losses,
amplifier=self.amplifier,
sec_perils=sec_perils,
ses_seed=oq.ses_seed)
# compute exposure stats
if hasattr(self, 'assetcol'):
save_agg_values(
self.datastore, self.assetcol, oq.loss_names, oq.aggregate_by)
def store_rlz_info(self, rel_ruptures):
"""
Save info about the composite source model inside the full_lt dataset
:param rel_ruptures: dictionary TRT -> number of relevant ruptures
"""
oq = self.oqparam
if hasattr(self, 'full_lt'): # no scenario
self.realizations = self.full_lt.get_realizations()
if not self.realizations:
raise RuntimeError('Empty logic tree: too much filtering?')
self.datastore['full_lt'] = self.full_lt
else: # scenario
self.full_lt = self.datastore['full_lt']
R = self.R
logging.info('There are %d realization(s)', R)
self.datastore['weights'] = arr = build_weights(self.realizations)
self.datastore.set_attrs('weights', nbytes=arr.nbytes)
if ('event_based' in oq.calculation_mode and R >= TWO16
or R >= TWO32):
raise ValueError(
'The logic tree has too many realizations (%d), use sampling '
'instead' % R)
elif R > 10000:
logging.warning(
'The logic tree has %d realizations(!), please consider '
'sampling it', R)
if rel_ruptures:
self.check_discardable(rel_ruptures)
def check_discardable(self, rel_ruptures):
"""
Check if logic tree reduction is possible
"""
n = len(self.full_lt.sm_rlzs)
keep_trts = set()
nrups = []
for grp_id, trt_smrs in enumerate(self.datastore['trt_smrs']):
trti, smrs = numpy.divmod(trt_smrs, n)
trt = self.full_lt.trts[trti[0]]
nr = rel_ruptures.get(grp_id, 0)
nrups.append(nr)
if nr:
keep_trts.add(trt)
self.datastore['est_rups_by_grp'] = U32(nrups)
discard_trts = set(self.full_lt.trts) - keep_trts
if discard_trts:
msg = ('No sources for some TRTs: you should set\n'
'discard_trts = %s\nin %s') % (
', '.join(discard_trts), self.oqparam.inputs['job_ini'])
logging.warning(msg)
def store_source_info(self, calc_times, nsites=False):
"""
Save (eff_ruptures, num_sites, calc_time) inside the source_info
"""
self.csm.update_source_info(calc_times, nsites)
recs = [tuple(row) for row in self.csm.source_info.values()]
hdf5.extend(self.datastore['source_info'],
numpy.array(recs, readinput.source_info_dt))
return [rec[0] for rec in recs] # return source_ids
def post_process(self):
"""For compatibility with the engine"""
class RiskCalculator(HazardCalculator):
"""
Base class for all risk calculators. A risk calculator must set the
attributes .crmodel, .sitecol, .assetcol, .riskinputs in the
pre_execute phase.
"""
def build_riskinputs(self, kind):
"""
:param kind:
kind of hazard getter, can be 'poe' or 'gmf'
:returns:
a list of RiskInputs objects, sorted by IMT.
"""
logging.info('Building risk inputs from %d realization(s)', self.R)
imtset = set(self.oqparam.imtls) | set(self.oqparam.get_sec_imts())
if not set(self.oqparam.risk_imtls) & imtset:
rsk = ', '.join(self.oqparam.risk_imtls)
haz = ', '.join(imtset)
raise ValueError('The IMTs in the risk models (%s) are disjoint '
"from the IMTs in the hazard (%s)" % (rsk, haz))
if not hasattr(self.crmodel, 'tmap'):
self.crmodel.tmap = readinput.taxonomy_mapping(
self.oqparam, self.assetcol.tagcol.taxonomy)
with self.monitor('building riskinputs'):
if self.oqparam.hazard_calculation_id:
dstore = self.datastore.parent
else:
dstore = self.datastore
riskinputs = getattr(self, '_gen_riskinputs_' + kind)(dstore)
assert riskinputs
if all(isinstance(ri.hazard_getter, getters.ZeroGetter)
for ri in riskinputs):
raise RuntimeError(f'the {kind}s are all zeros on the assets')
logging.info('Built %d risk inputs', len(riskinputs))
self.acc = None
return riskinputs
def _gen_riskinputs_gmf(self, dstore):
out = []
if 'gmf_data' not in dstore: # needed for case_shakemap
dstore.close()
dstore = self.datastore
if 'gmf_data' not in dstore:
raise InvalidFile('No gmf_data: did you forget gmfs_csv in %s?'
% self.oqparam.inputs['job_ini'])
rlzs = dstore['events']['rlz_id']
gmf_df = dstore.read_df('gmf_data', 'sid')
logging.info('Events per site: ~%d', len(gmf_df) / self.N)
logging.info('Grouping the GMFs by site ID')
by_sid = dict(list(gmf_df.groupby(gmf_df.index)))
asset_df = self.assetcol.to_dframe('site_id')
for sid, assets in asset_df.groupby(asset_df.index):
try:
df = by_sid[sid]
except KeyError:
getter = getters.ZeroGetter(
sid, rlzs, self.R, gmf_df.columns[1:]) # strip eid
else:
df['rlz'] = rlzs[df.eid.to_numpy()]
getter = getters.GmfDataGetter(sid, df, len(rlzs), self.R)
if len(dstore['gmf_data/eid']) == 0:
raise RuntimeError(
'There are no GMFs available: perhaps you did set '
'ground_motion_fields=False or a large minimum_intensity')
out.append(riskinput.RiskInput(getter, assets))
return out
def _gen_riskinputs_poe(self, dstore):
out = []
asset_df = self.assetcol.to_dframe('site_id')
for sid, assets in asset_df.groupby(asset_df.index):
# hcurves, shape (R, N)
ws = [rlz.weight for rlz in self.realizations]
getter = getters.PmapGetter(dstore, ws, [sid], self.oqparam.imtls)
for slc in general.split_in_slices(
len(assets), self.oqparam.assets_per_site_limit):
out.append(riskinput.RiskInput(getter, assets[slc]))
if slc.stop - slc.start >= TWO16:
logging.error('There are %d assets on site #%d!',
slc.stop - slc.start, sid)
return out
def execute(self):
"""
Parallelize on the riskinputs and returns a dictionary of results.
Require a `.core_task` to be defined with signature
(riskinputs, crmodel, param, monitor).
"""
if not hasattr(self, 'riskinputs'): # in the reportwriter
return
ct = self.oqparam.concurrent_tasks or 1
maxw = sum(ri.weight for ri in self.riskinputs) / ct
self.datastore.swmr_on()
smap = parallel.Starmap(
self.core_task.__func__, h5=self.datastore.hdf5)
smap.monitor.save('crmodel', self.crmodel)
for block in general.block_splitter(
self.riskinputs, maxw, get_weight, sort=True):
for ri in block:
# we must use eager reading for performance reasons:
# concurrent reading on the workers would be extra-slow;
# also, I could not get lazy reading to work with
# the SWMR mode for event_based_risk
if not isinstance(ri.hazard_getter, getters.PmapGetter):
ri.hazard_getter.init()
smap.submit((block, self.param))
return smap.reduce(self.combine, self.acc)
def combine(self, acc, res):
"""
Combine the outputs assuming acc and res are dictionaries
"""
if res is None:
raise MemoryError('You ran out of memory!')
return acc + res
def import_gmfs_csv(dstore, oqparam, sids):
"""
Import in the datastore a ground motion field CSV file.
:param dstore: the datastore
:param oqparam: an OqParam instance
:param sids: the complete site IDs
:returns: event_ids
"""
fname = oqparam.inputs['gmfs']
array = hdf5.read_csv(fname, {'sid': U32, 'eid': U32, None: F32},
renamedict=dict(site_id='sid', event_id='eid',
rlz_id='rlzi')).array
names = array.dtype.names # rlz_id, sid, ...
if names[0] == 'rlzi': # backward compatibility
names = names[1:] # discard the field rlzi
imts = [name.lstrip('gmv_') for name in names[2:]]
oqparam.hazard_imtls = {imt: [0] for imt in imts}
missing = set(oqparam.imtls) - set(imts)
if missing:
raise ValueError('The calculation needs %s which is missing from %s' %
(', '.join(missing), fname))
imt2idx = {imt: i for i, imt in enumerate(oqparam.imtls)}
arr = numpy.zeros(len(array), oqparam.gmf_data_dt())
for name in names:
if name.startswith('gmv_'):
try:
m = imt2idx[name[4:]]
except KeyError: # the file contains more than enough IMTs
pass
else:
arr[f'gmv_{m}'][:] = array[name]
else:
arr[name] = array[name]
n = len(numpy.unique(array[['sid', 'eid']]))
if n != len(array):
raise ValueError('Duplicated site_id, event_id in %s' % fname)
# store the events
eids = numpy.unique(array['eid'])
eids.sort()
if eids[0] != 0:
raise ValueError('The event_id must start from zero in %s' % fname)
E = len(eids)
events = numpy.zeros(E, rupture.events_dt)
events['id'] = eids
logging.info('Storing %d events, all relevant', E)
dstore['events'] = events
# store the GMFs
dic = general.group_array(arr, 'sid')
offset = 0
gmvlst = []
for sid in sids:
n = len(dic.get(sid, []))
if n:
offset += n
gmvs = dic[sid]
gmvlst.append(gmvs)
data = numpy.concatenate(gmvlst)
data.sort(order='eid')
create_gmf_data(dstore, oqparam.get_primary_imtls(),
oqparam.get_sec_imts(), data=data)
dstore['weights'] = numpy.ones(1)
return eids
def _getset_attrs(oq):
# read effective_time, num_events and imts from oq.inputs['gmfs']
# if the format of the file is old (v3.11) also sets the attributes
# investigation_time and ses_per_logic_tree_path on `oq`
with hdf5.File(oq.inputs['gmfs'], 'r') as f:
attrs = f['gmf_data'].attrs
etime = attrs.get('effective_time')
num_events = attrs.get('num_events')
if etime is None: # engine == 3.11
R = len(f['weights'])
num_events = len(f['events'])
arr = f.getitem('oqparam')
it = arr['par_name'] == b'investigation_time'
it = float(arr[it]['par_value'][0])
oq.investigation_time = it
ses = arr['par_name'] == b'ses_per_logic_tree_path'
ses = int(arr[ses]['par_value'][0])
oq.ses_per_logic_tree_path = ses
etime = it * ses * R
imts = []
for name in arr['par_name']:
if name.startswith(b'hazard_imtls.'):
imts.append(name[13:].decode('utf8'))
else: # engine >= 3.12
imts = attrs['imts'].split()
return dict(effective_time=etime, num_events=num_events, imts=imts)
def import_gmfs_hdf5(dstore, oqparam):
"""
Import in the datastore a ground motion field HDF5 file.
:param dstore: the datastore
:param oqparam: an OqParam instance
:returns: event_ids
"""
dstore['gmf_data'] = h5py.ExternalLink(oqparam.inputs['gmfs'], "gmf_data")
attrs = _getset_attrs(oqparam)
oqparam.hazard_imtls = {imt: [0] for imt in attrs['imts']}
# store the events
E = attrs['num_events']
events = numpy.zeros(E, rupture.events_dt)
events['id'] = numpy.arange(E)
rel = numpy.unique(dstore['gmf_data/eid'])
logging.info('Storing %d events, %d relevant', E, len(rel))
dstore['events'] = events
dstore['weights'] = numpy.ones(1)
return events['id']
def create_gmf_data(dstore, prim_imts, sec_imts=(), data=None):
"""
Create and possibly populate the datasets in the gmf_data group
"""
oq = dstore['oqparam']
R = dstore['full_lt'].get_num_rlzs()
M = len(prim_imts)
n = 0 if data is None else len(data['sid'])
items = [('sid', U32 if n == 0 else data['sid']),
('eid', U32 if n == 0 else data['eid'])]
for m in range(M):
col = f'gmv_{m}'
items.append((col, F32 if data is None else data[col]))
for imt in sec_imts:
items.append((str(imt), F32 if n == 0 else data[imt]))
if oq.investigation_time:
eff_time = oq.investigation_time * oq.ses_per_logic_tree_path * R
else:
eff_time = 0
dstore.create_df('gmf_data', items, 'gzip')
dstore.set_attrs('gmf_data', num_events=len(dstore['events']),
imts=' '.join(map(str, prim_imts)),
effective_time=eff_time)
if data is not None:
df = pandas.DataFrame(dict(items))
avg_gmf = numpy.zeros((2, n, M + len(sec_imts)), F32)
for sid, df in df.groupby(df.sid):
df.pop('eid')
df.pop('sid')
avg_gmf[:, sid] = stats.avg_std(df.to_numpy())
dstore['avg_gmf'] = avg_gmf
def save_agg_values(dstore, assetcol, lossnames, aggby):
"""
Store agg_keys, agg_values.
:returns: the aggkey dictionary key -> tags
"""
lst = []
aggkey = assetcol.tagcol.get_aggkey(aggby)
K = len(aggkey)
agg_number = numpy.zeros(K + 1, U32)
if aggby:
logging.info('Storing %d aggregation keys', len(aggkey))
dt = [(name + '_', U16) for name in aggby] + [
(name, hdf5.vstr) for name in aggby]
kvs = []
for key, val in aggkey.items():
val = tuple(python3compat.decode(val))
kvs.append(key + val)
lst.append(' '.join(val))
dstore['agg_keys'] = numpy.array(kvs, dt)
if aggby == ['id']:
kids = assetcol['ordinal']
elif aggby == ['site_id']:
kids = assetcol['site_id']
else:
key2i = {key: i for i, key in enumerate(aggkey)}
kids = [key2i[tuple(t)] for t in assetcol[aggby]]
dstore['assetcol/kids'] = U16(kids)
agg_number[:K] = general.fast_agg(kids, assetcol['number'], M=K)
agg_number[K] = assetcol['number'].sum()
dstore['agg_number'] = agg_number
lst.append('*total*')
if assetcol.get_value_fields():
dstore['agg_values'] = assetcol.get_agg_values(lossnames, aggby)
dstore.set_shape_descr(
'agg_values', aggregation=lst, loss_type=lossnames)
return aggkey if aggby else {}
def read_shakemap(calc, haz_sitecol, assetcol):
"""
Enabled only if there is a shakemap_id parameter in the job.ini.
Download, unzip, parse USGS shakemap files and build a corresponding
set of GMFs which are then filtered with the hazard site collection
and stored in the datastore.
"""
oq = calc.oqparam
E = oq.number_of_ground_motion_fields
oq.risk_imtls = oq.imtls or calc.datastore.parent['oqparam'].imtls
logging.info('Getting/reducing shakemap')
with calc.monitor('getting/reducing shakemap'):
# for instance for the test case_shakemap the haz_sitecol
# has sids in range(0, 26) while sitecol.sids is
# [8, 9, 10, 11, 13, 15, 16, 17, 18];
# the total assetcol has 26 assets on the total sites
# and the reduced assetcol has 9 assets on the reduced sites
if oq.shakemap_id:
uridict = {'kind': 'usgs_id', 'id': oq.shakemap_id}
elif 'shakemap' in oq.inputs:
uridict = {'kind': 'file_npy', 'fname': oq.inputs['shakemap']}
else:
uridict = oq.shakemap_uri
sitecol, shakemap, discarded = get_sitecol_shakemap(
uridict.pop('kind'), uridict, oq.imtls, haz_sitecol,
oq.asset_hazard_distance['default'])
if len(discarded):
calc.datastore['discarded'] = discarded
assetcol.reduce_also(sitecol)
logging.info('Extracted %d assets', len(assetcol))
# assemble dictionary to decide on the calculation method for the gmfs
if 'MMI' in oq.imtls:
# calculations with MMI should be executed
if len(oq.imtls) == 1:
# only MMI intensities
if oq.spatial_correlation != 'no' or oq.cross_correlation != 'no':
logging.warning('Calculations with MMI intensities do not '
'support correlation. No correlations '
'are applied.')
gmf_dict = {'kind': 'mmi'}
else:
# there are also other intensities than MMI
raise RuntimeError(
'There are the following intensities in your model: %s '
'Models mixing MMI and other intensities are not supported. '
% ', '.join(oq.imtls.keys()))
else:
# no MMI intensities, calculation with or without correlation
if oq.spatial_correlation != 'no' or oq.cross_correlation != 'no':
# cross correlation and/or spatial correlation after S&H
gmf_dict = {'kind': 'Silva&Horspool',
'spatialcorr': oq.spatial_correlation,
'crosscorr': oq.cross_correlation,
'cholesky_limit': oq.cholesky_limit}
else:
# no correlation required, basic calculation is faster
gmf_dict = {'kind': 'basic'}
logging.info('Building GMFs')
with calc.monitor('building/saving GMFs'):
imts, gmfs = to_gmfs(shakemap, gmf_dict, oq.site_effects,
oq.truncation_level, E, oq.random_seed, oq.imtls)
N, E, M = gmfs.shape
events = numpy.zeros(E, rupture.events_dt)
events['id'] = numpy.arange(E, dtype=U32)
calc.datastore['events'] = events
# convert into an array of dtype gmv_data_dt
lst = [(sitecol.sids[s], ei) + tuple(gmfs[s, ei])
for ei, event in enumerate(events)
for s in numpy.arange(N, dtype=U32)]
oq.hazard_imtls = {str(imt): [0] for imt in imts}
data = numpy.array(lst, oq.gmf_data_dt())
create_gmf_data(calc.datastore, imts, data=data)
return sitecol, assetcol
def create_risk_by_event(calc):
"""
Created an empty risk_by_event with keys event_id, agg_id, loss_id
and fields for damages, losses and consequences
"""
oq = calc.oqparam
dstore = calc.datastore
aggkey = getattr(calc, 'aggkey', {}) # empty if not aggregate_by
crmodel = calc.crmodel
if 'risk' in oq.calculation_mode:
descr = [('event_id', U32), ('agg_id', U32), ('loss_id', U8),
('loss', F32), ('variance', F32)]
dstore.create_df('risk_by_event', descr, K=len(aggkey),
L=len(oq.loss_names))
else: # damage + consequences
dmgs = ' '.join(crmodel.damage_states[1:])
descr = ([('event_id', U32), ('agg_id', U32), ('loss_id', U8)] +
[(dc, F32) for dc in crmodel.get_dmg_csq()])
dstore.create_df('risk_by_event', descr, K=len(aggkey),
L=len(oq.loss_names), limit_states=dmgs)
| agpl-3.0 |
ettm2012/MissionPlanner | Lib/site-packages/numpy/doc/creation.py | 94 | 5411 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or record arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard
to convert are libraries like PIL (able to read and write many image formats
such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
| gpl-3.0 |
MBARIMike/stoqs | stoqs/contrib/analysis/classify.py | 3 | 21623 | #!/usr/bin/env python
"""
Script to execute steps in the classification of measurements including:
1. Labeling specific MeasuredParameters
2. Tagging MeasuredParameters based on a model
Mike McCann
MBARI 16 June 2014
"""
import os
import sys
# Insert Django App directory (parent of config) into python path
sys.path.insert(0, os.path.abspath(os.path.join(
os.path.dirname(__file__), "../../")))
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'config.settings.local'
# django >=1.7
try:
import django
django.setup()
except AttributeError:
pass
import matplotlib as mpl
mpl.use('Agg') # Force matplotlib to not use any Xwindows backend
import matplotlib.pyplot as plt
import numpy as np
import warnings
from datetime import datetime
from django.db.utils import IntegrityError
from textwrap import wrap
from stoqs.models import (Activity, ResourceType, Resource, Measurement, MeasuredParameter,
MeasuredParameterResource, ResourceResource)
from utils.STOQSQManager import LABEL, DESCRIPTION, COMMANDLINE
from contrib.analysis import BiPlot, NoPPDataException
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
import pickle
LABELED = 'Labeled'
TRAIN = 'Train'
TEST = 'Test'
class Classifier(BiPlot):
'''
To hold methods and data to support classification of measurements in a STOQS database.
See http://scikit-learn.org/stable/auto_examples/plot_classifier_comparison.html
'''
classifiers = { 'Nearest_Neighbors': KNeighborsClassifier(3),
'Linear_SVM': SVC(kernel="linear", C=0.025),
'RBF_SVM': SVC(gamma=2, C=1),
'Decision_Tree': DecisionTreeClassifier(max_depth=5),
'Random_Forest': RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
'AdaBoost': AdaBoostClassifier(),
'Naive_Bayes': GaussianNB(),
'LDA': LDA(),
'QDA': QDA()
}
def getActivity(self, mpx, mpy):
'''
Return activity object which MeasuredParameters mpx and mpy belong to
'''
acts = Activity.objects.using(self.args.database).filter(
instantpoint__measurement__measuredparameter__id__in=(mpx,mpy)).distinct()
if not acts:
print("acts = %s" % acts)
raise Exception('Not exactly 1 activity returned with SQL = \n%s' % str(acts.query))
else:
return acts[0]
def saveCommand(self):
'''
Save the command executed to a Resource and return it for the doXxxx() method to associate it with the resources it creates
'''
rt, _ = ResourceType.objects.using(self.args.database).get_or_create(name=LABEL, description='metadata')
r, _ = Resource.objects.using(self.args.database).get_or_create(name=COMMANDLINE, value=self.commandline, resourcetype=rt)
return r
def saveLabelSet(self, clResource, label, x_ids, y_ids, description, typeName, typeDescription):
'''
Save the set of labels in MeasuredParameterResource. Accepts 2 input vectors. (TODO: generalize to N input vectors);
description is used to describe the criteria for assigning this label. The typeName and typeDecription may be used to
refer to the grouping, and associate via the grouping the other labels made in the heuristic applied.
'''
try:
# Label
rt, _ = ResourceType.objects.using(self.args.database).get_or_create(name=typeName, description=typeDescription)
r, _ = Resource.objects.using(self.args.database).get_or_create(name=LABEL, value=label, resourcetype=rt)
# Label's description
rdt, _ = ResourceType.objects.using(self.args.database).get_or_create(name=LABEL, description='metadata')
rd, _ = Resource.objects.using(self.args.database).get_or_create(name=DESCRIPTION, value=description, resourcetype=rdt)
rr = ResourceResource(fromresource=r, toresource=rd)
rr.save(using=self.args.database)
# Associate with commandlineResource
ResourceResource.objects.using(self.args.database).get_or_create(fromresource=r, toresource=clResource)
except IntegrityError as e:
print(str(e))
print("Ignoring")
# Associate MeasuredParameters with Resource
if self.args.verbose:
print(" Saving %d values of '%s' with type '%s'" % (len(x_ids), label, typeName))
for x_id,y_id in zip(x_ids, y_ids):
a = self.getActivity(x_id, y_id)
mp_x = MeasuredParameter.objects.using(self.args.database).get(pk=x_id)
mp_y = MeasuredParameter.objects.using(self.args.database).get(pk=y_id)
MeasuredParameterResource.objects.using(self.args.database).get_or_create(
activity=a, measuredparameter=mp_x, resource=r)
MeasuredParameterResource.objects.using(self.args.database).get_or_create(
activity=a, measuredparameter=mp_y, resource=r)
def removeLabels(self, labeledGroupName, label=None, description=None, commandline=None): # pragma: no cover
'''
Delete labeled MeasuredParameterResources that have ResourceType.name=labeledGroupName (such as 'Labeled Plankton').
Restrict deletion to the other passed in options, if specified: label is like 'diatom', description is like
'Using Platform dorado, Parameter {'salinity': ('33.65', '33.70')} from 20130916T124035 to 20130919T233905'
(commandline is too long to show in this doc string - see examples in usage note). Note: Some metadatda
ResourceTypes will not be removed even though the Resources that use them will be removed.
'''
# Remove MeasuredParameter associations with Resource (Labeled data)
mprs = MeasuredParameterResource.objects.using(self.args.database).filter(resource__resourcetype__name=labeledGroupName
).select_related('resource')
if label:
mprs = mprs.filter(resource__name=LABEL, resource__value=label)
if self.args.verbose > 1:
print(" Removing MeasuredParameterResources with type = '%s' and label = %s" % (labeledGroupName, label))
rs = []
for mpr in mprs:
rs.append(mpr.resource)
mpr.delete(using=self.args.database)
# Remove Resource associations with Resource (label metadata), make rs list distinct with set() before iterating on the delete()
if label and description and commandline:
try:
rrs = ResourceResource.objects.using(self.args.database).filter(
(QDA(fromresource__name=LABEL) & QDA(fromresource__value=label)) &
((QDA(toresource__name=DESCRIPTION) & QDA(toresource__value=description)) |
(QDA(toresource__name=COMMANDLINE) & QDA(toresource__value=commandline)) ) )
if self.args.verbose > 1:
print(" Removing ResourceResources with fromresource__value = '%s' and toresource__value = '%s'" % (label, description))
for rr in rrs:
rr.delete(using=self.args.database)
except TypeError:
# Likely TypeError: __init__() got an unexpected keyword argument 'fromresource__name'
if self.args.verbose > 1:
print(" Previous Resource associations not found.")
else:
if self.args.verbose > 1:
print(" Removing Resources associated with labeledGroupName = %s'" % labeledGroupName)
for r in set(rs):
r.delete(using=self.args.database)
def createLabels(self, labeledGroupName):
'''
Using discriminator, mins, and maxes label MeasuredParameters in the database so that we can do supervised learning
'''
sdt = datetime.strptime(self.args.start, '%Y%m%dT%H%M%S')
edt = datetime.strptime(self.args.end, '%Y%m%dT%H%M%S')
commandlineResource = self.saveCommand()
for label, dmin, dmax in zip(self.args.labels, self.args.mins, self.args.maxes):
# Multiple discriminators are possible...
pvDict = {self.args.discriminator: (dmin, dmax)}
if self.args.verbose:
print("Making label '%s' with discriminator %s" % (label, pvDict))
try:
x_ids, y_ids, _, _, _ = self._getPPData(sdt, edt, self.args.platform, self.args.inputs[0],
self.args.inputs[1], pvDict, returnIDs=True, sampleFlag=False)
except NoPPDataException as e:
print(str(e))
if self.args.verbose:
print(" (%d, %d) MeasuredParameters returned from database %s" % (len(x_ids), len(y_ids), self.args.database))
description = 'Using Platform %s, Parameter %s from %s to %s' % (self.args.platform, pvDict, self.args.start, self.args.end)
if self.args.clobber:
self.removeLabels(labeledGroupName, label, description, commandlineResource.value)
self.saveLabelSet(commandlineResource, label, x_ids, y_ids, description, labeledGroupName,
'Labeled with %s as discriminator' % self.args.discriminator)
def loadLabeledData(self, labeledGroupName, classes): # pragma: no cover
'''
Retrieve from the database to set of Labeled data and return the standard X, and y arrays that the scikit-learn package uses
'''
if len(classes) > 2:
raise Exception('Maximum classes length is 2')
f0 = np.array(0)
f1 = np.array(0)
y = np.array(0, dtype=int)
target = 0
for label in classes:
mprs = MeasuredParameterResource.objects.using(self.args.database).filter(
resource__name=LABEL, resource__resourcetype__name=labeledGroupName,
resource__value=label
).values_list('measuredparameter__datavalue', flat=True)
count = mprs.filter(measuredparameter__parameter__name=self.args.inputs[0]).count()
if self.args.verbose:
print('count = {} for label = {}'.format(count, label))
if count == 0:
warnings.warn('count = 0 for label = {}'.format(label))
f0 = np.append(f0, mprs.filter(measuredparameter__parameter__name=self.args.inputs[0]))
f1 = np.append(f1, mprs.filter(measuredparameter__parameter__name=self.args.inputs[1]))
y = np.append(y, np.ones(count) * target)
target += 1
# Form the feature vectors into the X matrix that sklearn wants
X = np.concatenate((f0.reshape(-1,1), f1.reshape(-1,1)), axis=1)
return X, y
def doModelsScore(self, labeledGroupName):
'''
Print scores for several different classifiers
'''
X, y = self.loadLabeledData(labeledGroupName, classes=self.args.classes)
X = StandardScaler().fit_transform(X)
if X.any() and y.any():
for name, clf in list(self.classifiers.items()):
scores = cross_val_score(clf, X, y, cv=5)
print("%-18s accuracy: %0.2f (+/- %0.2f)" % (name, scores.mean(), scores.std() * 2))
else:
raise Exception('No data returned for labeledGroupName = %s' % labeledGroupName)
def createClassifier(self, labeledGroupName): # pragma: no cover
'''
Query the database for labeled training data, fit a model to it, and save the pickled
model back to the database. Follow the pattern in the example at
http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html
and learn about Learning at https://www.youtube.com/watch?v=4ONBVNm3isI (see at time 2:33 and
following - though the whole tutorial is worth watching).
'''
clf = self.classifiers[self.args.classifier]
X, y = self.loadLabeledData(labeledGroupName, classes=self.args.classes)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.args.test_size, train_size=self.args.train_size)
import pdb
pdb.set_trace()
# TODO: Implement graphical evaluation as in http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
if self.args.verbose:
print(" score = %f" % score)
self._saveModel(labeledGroupName, clf)
def _saveModel(self, labeledGroupName, clf):
'''
Pickle and save the model in the database
'''
# Save pickled mode to the database and relate it to the LABELED data resource
if self.args.modelBaseName:
rt, _ = ResourceType.objects.using(self.args.database).get_or_create(name='FittedModel', description='SVC(gamma=2, C=1)')
labeledResource = Resource.objects.using(self.args.database).filter(resourcetype__name=labeledGroupName)[0]
modelValue = pickle.dumps(clf).encode("zip").encode("base64").strip()
modelResource = Resource(name=self.args.modelBaseName, value=modelValue, resourcetype=rt)
modelResource.save(using=self.args.database)
rr = ResourceResource(fromresource=labeledResource, toresource=modelResource)
rr.save(using=self.args.database)
if self.args.verbose:
print('Saved fitted model to the database with name = %s' % self.args.modelBaseName)
print('Retrieve with "clf = pickle.loads(r.value.decode("base64").decode("zip"))"')
def getFileName(self, figCount):
'''
Construct plot file name
'''
fileName = 'cpBiPlot_%02d' % figCount
if self.args.daytime:
fileName += '_day'
if self.args.nighttime:
fileName += '_night'
fileName += '.png'
fileName = os.path.join(self.args.plotDir, self.args.plotPrefix + fileName)
return fileName
def saveFigure(self, fig, figCount):
'''
Save this page
'''
provStr = 'Created with STOQS command ' + '\\\n'.join(wrap(self.commandline, width=160)) + ' on ' + datetime.now().ctime()
plt.figtext(0.0, 0.0, provStr, size=7, horizontalalignment='left', verticalalignment='bottom')
plt.tight_layout()
if self.args.title:
fig.text(0.5, 0.975, self.args.title, horizontalalignment='center', verticalalignment='top')
fileName = self.getFileName(figCount)
if self.args.verbose:
print(' Saving file', fileName)
fig.savefig(fileName)
def process_command_line(self):
'''
The argparse library is included in Python 2.7 and is an added package for STOQS.
'''
import argparse
from argparse import RawTextHelpFormatter
examples = 'Example machine learning workflow:' + '\n\n'
examples += "Step 1: Create Labeled features in the database using salinity as a discriminator:\n"
examples += sys.argv[0] + (" --createLabels --groupName Plankton --database stoqs_september2013_t"
" --platform dorado --start 20130916T124035 --end 20130919T233905"
" --inputs bbp700 fl700_uncorr --discriminator salinity"
" --labels diatom dino1 dino2 sediment --mins 33.33 33.65 33.70 33.75"
" --maxes 33.65 33.70 33.75 33.93 --clobber -v\n\n")
examples += "Step 2: Evaluate classifiers using the labels created in Step 1\n"
examples += sys.argv[0] + (" --doModelsScore --groupName Plankton --database stoqs_september2013_t"
" --classes diatom sediment --inputs bbp700 fl700_uncorr\n\n")
examples += "Step 3: Create a prediction model using the labels created in Step 1\n"
examples += sys.argv[0] + (" --createClassifier --groupName Plankton --database stoqs_september2013_t"
" --classifier Nearest_Neighbors --classes diatom sediment"
" --modelBaseName Nearest_Neighbors_1\n\n")
examples += "Step 4: Use a model to classify new measurements\n"
examples += '\nIf running from cde-package replace ".py" with ".py.cde" in the above list.'
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter,
description='Script to execute steps in the classification of measurements',
epilog=examples)
parser.add_argument('-p', '--platform', action='store', help='STOQS Platform name for training data access')
parser.add_argument('-d', '--database', action='store', help='Database alias', default='stoqs_september2013_o', required=True)
##parser.add_argument('--minDepth', action='store', help='Minimum depth for data queries', default=None, type=float)
##parser.add_argument('--maxDepth', action='store', help='Maximum depth for data queries', default=None, type=float)
parser.add_argument('--createLabels', action='store_true', help='Label data with --discriminator, --groupName --labels, --mins, and --maxes options')
parser.add_argument('--removeLabels', action='store_true', help='Remove Labels with --groupName option')
parser.add_argument('--createClassifier', action='store_true', help='Fit a model to Labeled data with --classifier to labels in --labels and save in database as --modelBaseName')
parser.add_argument('--doModelsScore', action='store_true', help='Print scores for fits of various models for --groupName')
parser.add_argument('--inputs', action='store', help='List of STOQS Parameter names to use as features, separated by spaces', nargs='*')
parser.add_argument('--start', action='store', help='Start time in YYYYMMDDTHHMMSS format', default='19000101T000000')
parser.add_argument('--end', action='store', help='End time in YYYYMMDDTHHMMSS format', default='22000101T000000')
parser.add_argument('--discriminator', action='store', help='Parameter name to use to discriminate the data')
parser.add_argument('--groupName', action='store', help='Name to follow "Labeled" in UI describing the group of --labels for --createLabels option')
parser.add_argument('--labels', action='store', help='List of labels to create separated by spaces', nargs='*')
parser.add_argument('--mins', action='store', help='List of labels to create separated by spaces', nargs='*')
parser.add_argument('--maxes', action='store', help='List of labels to create separated by spaces', nargs='*')
parser.add_argument('--test_size', action='store', help='Proportion of discriminated sample to save as Test set', default=0.4, type=float)
parser.add_argument('--train_size', action='store', help='Proportion of discriminated sample to save as Train set', default=0.4, type=float)
parser.add_argument('--classifier', choices=list(self.classifiers.keys()), help='Specify classifier to use with --createClassifier option')
parser.add_argument('--modelBaseName', action='store', help='Base name of the model to store in the database')
parser.add_argument('--classes', action='store', help='Labels to load from the database for --doModelsScore and --createClassifier', nargs='*')
parser.add_argument('--clobber', action='store_true', help='Remove existing MeasuredParameterResource records before adding new classification')
parser.add_argument('-v', '--verbose', nargs='?', choices=[1,2,3], type=int, help='Turn on verbose output. Higher number = more output.', const=1, default=0)
self.args = parser.parse_args()
self.commandline = ' '.join(sys.argv)
# Conditional tests
if self.args.doModelsScore:
if not c.args.classes:
parser.error('--doModelsScore requires --classes')
if not c.args.inputs:
parser.error('--doModelsScore requires --inputs')
if __name__ == '__main__':
c = Classifier()
c.process_command_line()
if c.args.createLabels:
c.createLabels(' '.join((LABELED, c.args.groupName)))
if c.args.removeLabels:
c.removeLabels(' '.join((LABELED, c.args.groupName)))
elif c.args.doModelsScore:
c.doModelsScore(' '.join((LABELED, c.args.groupName)))
elif c.args.createClassifier:
c.createClassifier(' '.join((LABELED, c.args.groupName)))
| gpl-3.0 |
imaculate/scikit-learn | sklearn/tests/test_grid_search.py | 68 | 28856 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import warnings
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.exceptions import ChangedBehaviorWarning
from sklearn.exceptions import FitFailedWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler)
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
paulmueller/bornscat | examples/compare_mie_2d.py | 2 | 1390 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" TODO
compare scattered field of Rytov with Mie scattering
"""
from __future__ import division
from __future__ import print_function
import numpy as np
import matplotlib
matplotlib.use("wxagg")
from matplotlib import pylab as plt
import os
import sys
import time
import unwrap
DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, DIR+"/../")
try:
import miefield as mie
except:
print("Either run 'pip install miefield' or download it manually "+
"from https://github.com/RI-imaging/miefield")
exit(0)
import bornscat as br
rfac = 4
# Set measurement parameters
# Compute scattered field from cylinder
radius = 5 # wavelengths
nmed = 1.333
ncyl = 1.334
size = 128*rfac # pixels, odd pixels make the day?
res = 2*rfac #23 # px/wavelengths
lambd = 500
# Number of projections
A = 200
# create refractive index map for Born
n = nmed * np.ones((size,size))
n0 = 1*n
rad = radius*res
#x=np.linspace(-size/2,size/2,size, endpoint=False)
x=np.linspace(-size/2,size/2,size, endpoint=False)
xv = x.reshape(-1,1)
yv = x.reshape(1,-1)
n[np.where((xv**2+yv**2 < rad**2))] = ncyl
### 2D plotting born
# Rytov
print("Rytov scattered wave")
rytov_u0 = br.rytov_2d(n0, nmed, res)
rytov_u = br.rytov_2d(n, nmed, res)
ro = rytov_u/rytov_u0
plt.imshow(np.angle(ro))
plt.show()
import IPython
IPython.embed()
| bsd-3-clause |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/mpl_toolkits/axes_grid1/mpl_axes.py | 10 | 5045 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
import matplotlib.axes as maxes
from matplotlib.artist import Artist
from matplotlib.axis import XAxis, YAxis
class SimpleChainedObjects(object):
def __init__(self, objects):
self._objects = objects
def __getattr__(self, k):
_a = SimpleChainedObjects([getattr(a, k) for a in self._objects])
return _a
def __call__(self, *kl, **kwargs):
for m in self._objects:
m(*kl, **kwargs)
class Axes(maxes.Axes):
def toggle_axisline(self, b):
warnings.warn("toggle_axisline is not necessary and deprecated in axes_grid1")
class AxisDict(dict):
def __init__(self, axes):
self.axes = axes
super(Axes.AxisDict, self).__init__()
def __getitem__(self, k):
if isinstance(k, tuple):
r = SimpleChainedObjects(
[super(Axes.AxisDict, self).__getitem__(k1) for k1 in k])
return r
elif isinstance(k, slice):
if k.start is None and k.stop is None and k.step is None:
r = SimpleChainedObjects(list(six.itervalues(self)))
return r
else:
raise ValueError("Unsupported slice")
else:
return dict.__getitem__(self, k)
def __call__(self, *v, **kwargs):
return maxes.Axes.axis(self.axes, *v, **kwargs)
def __init__(self, *kl, **kw):
super(Axes, self).__init__(*kl, **kw)
def _init_axis_artists(self, axes=None):
if axes is None:
axes = self
self._axislines = self.AxisDict(self)
self._axislines["bottom"] = SimpleAxisArtist(self.xaxis, 1, self.spines["bottom"])
self._axislines["top"] = SimpleAxisArtist(self.xaxis, 2, self.spines["top"])
self._axislines["left"] = SimpleAxisArtist(self.yaxis, 1, self.spines["left"])
self._axislines["right"] = SimpleAxisArtist(self.yaxis, 2, self.spines["right"])
def _get_axislines(self):
return self._axislines
axis = property(_get_axislines)
def cla(self):
super(Axes, self).cla()
self._init_axis_artists()
class SimpleAxisArtist(Artist):
def __init__(self, axis, axisnum, spine):
self._axis = axis
self._axisnum = axisnum
self.line = spine
if isinstance(axis, XAxis):
self._axis_direction = ["bottom", "top"][axisnum-1]
elif isinstance(axis, YAxis):
self._axis_direction = ["left", "right"][axisnum-1]
else:
raise ValueError("axis must be instance of XAxis or YAxis : %s is provided" % (axis,))
Artist.__init__(self)
def _get_major_ticks(self):
tickline = "tick%dline" % self._axisnum
return SimpleChainedObjects([getattr(tick, tickline) for tick \
in self._axis.get_major_ticks()])
def _get_major_ticklabels(self):
label = "label%d" % self._axisnum
return SimpleChainedObjects([getattr(tick, label) for tick \
in self._axis.get_major_ticks()])
def _get_label(self):
return self._axis.label
major_ticks = property(_get_major_ticks)
major_ticklabels = property(_get_major_ticklabels)
label = property(_get_label)
def set_visible(self, b):
self.toggle(all=b)
self.line.set_visible(b)
self._axis.set_visible(True)
Artist.set_visible(self, b)
def set_label(self, txt):
self._axis.set_label_text(txt)
def toggle(self, all=None, ticks=None, ticklabels=None, label=None):
if all:
_ticks, _ticklabels, _label = True, True, True
elif all is not None:
_ticks, _ticklabels, _label = False, False, False
else:
_ticks, _ticklabels, _label = None, None, None
if ticks is not None:
_ticks = ticks
if ticklabels is not None:
_ticklabels = ticklabels
if label is not None:
_label = label
tickOn = "tick%dOn" % self._axisnum
labelOn = "label%dOn" % self._axisnum
if _ticks is not None:
tickparam = {tickOn: _ticks}
self._axis.set_tick_params(**tickparam)
if _ticklabels is not None:
tickparam = {labelOn: _ticklabels}
self._axis.set_tick_params(**tickparam)
if _label is not None:
pos = self._axis.get_label_position()
if (pos == self._axis_direction) and not _label:
self._axis.label.set_visible(False)
elif _label:
self._axis.label.set_visible(True)
self._axis.set_label_position(self._axis_direction)
if __name__ == '__main__':
import matplotlib.pyplot as plt
fig = plt.figure()
ax = Axes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
ax.cla()
| bsd-2-clause |
rs2/pandas | pandas/core/flags.py | 1 | 3566 | import weakref
class Flags:
"""
Flags that apply to pandas objects.
.. versionadded:: 1.2.0
Parameters
----------
obj : Series or DataFrame
The object these flags are associated with
allows_duplicate_labels : bool, default True
Whether to allow duplicate labels in this object. By default,
duplicate labels are permitted. Setting this to ``False`` will
cause an :class:`errors.DuplicateLabelError` to be raised when
`index` (or columns for DataFrame) is not unique, or any
subsequent operation on introduces duplicates.
See :ref:`duplicates.disallow` for more.
.. warning::
This is an experimental feature. Currently, many methods fail to
propagate the ``allows_duplicate_labels`` value. In future versions
it is expected that every method taking or returning one or more
DataFrame or Series objects will propagate ``allows_duplicate_labels``.
Notes
-----
Attributes can be set in two ways
>>> df = pd.DataFrame()
>>> df.flags
<Flags(allows_duplicate_labels=True)>
>>> df.flags.allows_duplicate_labels = False
>>> df.flags
<Flags(allows_duplicate_labels=False)>
>>> df.flags['allows_duplicate_labels'] = True
>>> df.flags
<Flags(allows_duplicate_labels=True)>
"""
_keys = {"allows_duplicate_labels"}
def __init__(self, obj, *, allows_duplicate_labels):
self._allows_duplicate_labels = allows_duplicate_labels
self._obj = weakref.ref(obj)
@property
def allows_duplicate_labels(self) -> bool:
"""
Whether this object allows duplicate labels.
Setting ``allows_duplicate_labels=False`` ensures that the
index (and columns of a DataFrame) are unique. Most methods
that accept and return a Series or DataFrame will propagate
the value of ``allows_duplicate_labels``.
See :ref:`duplicates` for more.
See Also
--------
DataFrame.attrs : Set global metadata on this object.
DataFrame.set_flags : Set global flags on this object.
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2]}, index=['a', 'a'])
>>> df.allows_duplicate_labels
True
>>> df.allows_duplicate_labels = False
Traceback (most recent call last):
...
pandas.errors.DuplicateLabelError: Index has duplicates.
positions
label
a [0, 1]
"""
return self._allows_duplicate_labels
@allows_duplicate_labels.setter
def allows_duplicate_labels(self, value: bool):
value = bool(value)
obj = self._obj()
if obj is None:
raise ValueError("This flag's object has been deleted.")
if not value:
for ax in obj.axes:
ax._maybe_check_unique()
self._allows_duplicate_labels = value
def __getitem__(self, key):
if key not in self._keys:
raise KeyError(key)
return getattr(self, key)
def __setitem__(self, key, value):
if key not in self._keys:
raise ValueError(f"Unknown flag {key}. Must be one of {self._keys}")
setattr(self, key, value)
def __repr__(self):
return f"<Flags(allows_duplicate_labels={self.allows_duplicate_labels})>"
def __eq__(self, other):
if isinstance(other, type(self)):
return self.allows_duplicate_labels == other.allows_duplicate_labels
return False
| bsd-3-clause |
jstoxrocky/statsmodels | statsmodels/genmod/_prediction.py | 27 | 9437 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 19 11:29:18 2014
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from scipy import stats
# this is similar to ContrastResults after t_test, partially copied and adjusted
class PredictionResults(object):
def __init__(self, predicted_mean, var_pred_mean, var_resid=None,
df=None, dist=None, row_labels=None, linpred=None, link=None):
# TODO: is var_resid used? drop from arguments?
self.predicted_mean = predicted_mean
self.var_pred_mean = var_pred_mean
self.df = df
self.var_resid = var_resid
self.row_labels = row_labels
self.linpred = linpred
self.link = link
if dist is None or dist == 'norm':
self.dist = stats.norm
self.dist_args = ()
elif dist == 't':
self.dist = stats.t
self.dist_args = (self.df,)
else:
self.dist = dist
self.dist_args = ()
@property
def se_obs(self):
raise NotImplementedError
return np.sqrt(self.var_pred_mean + self.var_resid)
@property
def se_mean(self):
return np.sqrt(self.var_pred_mean)
@property
def tvalues(self):
return self.predicted_mean / self.se_mean
def t_test(self, value=0, alternative='two-sided'):
'''z- or t-test for hypothesis that mean is equal to value
Parameters
----------
value : array_like
value under the null hypothesis
alternative : string
'two-sided', 'larger', 'smaller'
Returns
-------
stat : ndarray
test statistic
pvalue : ndarray
p-value of the hypothesis test, the distribution is given by
the attribute of the instance, specified in `__init__`. Default
if not specified is the normal distribution.
'''
# from statsmodels.stats.weightstats
# assumes symmetric distribution
stat = (self.predicted_mean - value) / self.se_mean
if alternative in ['two-sided', '2-sided', '2s']:
pvalue = self.dist.sf(np.abs(stat), *self.dist_args)*2
elif alternative in ['larger', 'l']:
pvalue = self.dist.sf(stat, *self.dist_args)
elif alternative in ['smaller', 's']:
pvalue = self.dist.cdf(stat, *self.dist_args)
else:
raise ValueError('invalid alternative')
return stat, pvalue
def conf_int(self, method='endpoint', alpha=0.05, **kwds):
"""
Returns the confidence interval of the value, `effect` of the constraint.
This is currently only available for t and z tests.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
kwds : extra keyword arguments
currently ignored, only for compatibility, consistent signature
Returns
-------
ci : ndarray, (k_constraints, 2)
The array has the lower and the upper limit of the confidence
interval in the columns.
"""
tmp = np.linspace(0, 1, 6)
is_linear = (self.link.inverse(tmp) == tmp).all()
if method == 'endpoint' and not is_linear:
ci_linear = self.linpred.conf_int(alpha=alpha, obs=False)
ci = self.link.inverse(ci_linear)
elif method == 'delta' or is_linear:
se = self.se_mean
q = self.dist.ppf(1 - alpha / 2., *self.dist_args)
lower = self.predicted_mean - q * se
upper = self.predicted_mean + q * se
ci = np.column_stack((lower, upper))
# if we want to stack at a new last axis, for lower.ndim > 1
# np.concatenate((lower[..., None], upper[..., None]), axis=-1)
return ci
def summary_frame(self, what='all', alpha=0.05):
# TODO: finish and cleanup
import pandas as pd
from statsmodels.compat.collections import OrderedDict
#ci_obs = self.conf_int(alpha=alpha, obs=True) # need to split
ci_mean = self.conf_int(alpha=alpha)
to_include = OrderedDict()
to_include['mean'] = self.predicted_mean
to_include['mean_se'] = self.se_mean
to_include['mean_ci_lower'] = ci_mean[:, 0]
to_include['mean_ci_upper'] = ci_mean[:, 1]
self.table = to_include
#OrderedDict doesn't work to preserve sequence
# pandas dict doesn't handle 2d_array
#data = np.column_stack(list(to_include.values()))
#names = ....
res = pd.DataFrame(to_include, index=self.row_labels,
columns=to_include.keys())
return res
def get_prediction_glm(self, exog=None, transform=True, weights=None,
row_labels=None, linpred=None, link=None, pred_kwds=None):
"""
compute prediction results
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
weights : array_like, optional
Weights interpreted as in WLS, used for the variance of the predicted
residual.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction_results : instance
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction of the mean and of new observations.
"""
### prepare exog and row_labels, based on base Results.predict
if transform and hasattr(self.model, 'formula') and exog is not None:
from patsy import dmatrix
exog = dmatrix(self.model.data.design_info.builder,
exog)
if exog is not None:
if row_labels is None:
if hasattr(exog, 'index'):
row_labels = exog.index
else:
row_labels = None
exog = np.asarray(exog)
if exog.ndim == 1 and (self.model.exog.ndim == 1 or
self.model.exog.shape[1] == 1):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
else:
exog = self.model.exog
if weights is None:
weights = getattr(self.model, 'weights', None)
if row_labels is None:
row_labels = getattr(self.model.data, 'row_labels', None)
# need to handle other arrays, TODO: is delegating to model possible ?
if weights is not None:
weights = np.asarray(weights)
if (weights.size > 1 and
(weights.ndim != 1 or weights.shape[0] == exog.shape[1])):
raise ValueError('weights has wrong shape')
### end
pred_kwds['linear'] = False
predicted_mean = self.model.predict(self.params, exog, **pred_kwds)
covb = self.cov_params()
link_deriv = self.model.family.link.inverse_deriv(linpred.predicted_mean)
var_pred_mean = link_deriv**2 * (exog * np.dot(covb, exog.T).T).sum(1)
# TODO: check that we have correct scale, Refactor scale #???
var_resid = self.scale / weights # self.mse_resid / weights
# special case for now:
if self.cov_type == 'fixed scale':
var_resid = self.cov_kwds['scale'] / weights
dist = ['norm', 't'][self.use_t]
return PredictionResults(predicted_mean, var_pred_mean, var_resid,
df=self.df_resid, dist=dist,
row_labels=row_labels, linpred=linpred, link=link)
def params_transform_univariate(params, cov_params, link=None, transform=None,
row_labels=None):
"""
results for univariate, nonlinear, monotonicaly transformed parameters
This provides transformed values, standard errors and confidence interval
for transformations of parameters, for example in calculating rates with
`exp(params)` in the case of Poisson or other models with exponential
mean function.
"""
from statsmodels.genmod.families import links
if link is None and transform is None:
link = links.Log()
if row_labels is None and hasattr(params, 'index'):
row_labels = params.index
params = np.asarray(params)
predicted_mean = link.inverse(params)
link_deriv = link.inverse_deriv(params)
var_pred_mean = link_deriv**2 * np.diag(cov_params)
# TODO: do we want covariance also, or just var/se
dist = stats.norm
# TODO: need ci for linear prediction, method of `lin_pred
linpred = PredictionResults(params, np.diag(cov_params), dist=dist,
row_labels=row_labels, link=links.identity())
res = PredictionResults(predicted_mean, var_pred_mean, dist=dist,
row_labels=row_labels, linpred=linpred, link=link)
return res
| bsd-3-clause |
andyh616/mne-python | examples/inverse/plot_lcmv_beamformer_volume.py | 18 | 3046 | """
===================================================================
Compute LCMV inverse solution on evoked data in volume source space
===================================================================
Compute LCMV inverse solution on an auditory evoked dataset in a volume source
space. It stores the solution in a nifti file for visualisation e.g. with
Freeview.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.io import Raw
from mne.beamformer import lcmv
from nilearn.plotting import plot_stat_map
from nilearn.image import index_img
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-vol-7-fwd.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
###############################################################################
# Get epochs
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = Raw(raw_fname)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads', selection=left_temporal_channels)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()
forward = mne.read_forward_solution(fname_fwd)
# Read regularized noise covariance and compute regularized data covariance
noise_cov = mne.read_cov(fname_cov)
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
method='shrunk')
# Run free orientation (vector) beamformer. Source orientation can be
# restricted by setting pick_ori to 'max-power' (or 'normal' but only when
# using a surface-based source space)
stc = lcmv(evoked, forward, noise_cov, data_cov, reg=0.01, pick_ori=None)
# Save result in stc files
stc.save('lcmv-vol')
stc.crop(0.0, 0.2)
# Save result in a 4D nifti file
img = mne.save_stc_as_volume('lcmv_inverse.nii.gz', stc,
forward['src'], mri_resolution=False)
t1_fname = data_path + '/subjects/sample/mri/T1.mgz'
# Plotting with nilearn ######################################################
plot_stat_map(index_img(img, 61), t1_fname, threshold=0.8,
title='LCMV (t=%.1f s.)' % stc.times[61])
# plot source time courses with the maximum peak amplitudes
plt.figure()
plt.plot(stc.times, stc.data[np.argsort(np.max(stc.data, axis=1))[-40:]].T)
plt.xlabel('Time (ms)')
plt.ylabel('LCMV value')
plt.show()
| bsd-3-clause |
castelao/CoTeDe | cotede/qctests/tukey53H.py | 1 | 3985 | # -*- coding: utf-8 -*-
"""
Shall I use a decorator??
DATA = [25.32, 25.34, 25.34, 25.31, 24.99, 23.46, 21.85, 17.95, 15.39, 11.08, 6.93, 7.93, 5.71, 3.58, np.nan, 1, 1]
tukey53H(np.array, np.maskedArray, pd.Series, xr.DataArray)
delta = tukey53H(x)
w = np.hamming(l)
sigma = (ma.convolve(x, w, mode="same") / w.sum()).std()
return delta / sigma
"""
import logging
import numpy as np
from numpy import ma
from cotede.qctests import QCCheckVar
try:
import pandas as pd
PANDAS_AVAILABLE = True
except ImportError:
PANDAS_AVAILABLE = False
module_logger = logging.getLogger(__name__)
def tukey53H(x, normalize=False):
"""Spike test Tukey 53H from Goring & Nikora 2002
Return
------
delta :
An array with the same shape of input x of the difference between x
and a smoothed x.
"""
if isinstance(x, ma.MaskedArray):
x[x.mask] = np.nan
x = x.data
if not PANDAS_AVAILABLE:
return _tukey53H_numpy(x, normalize=normalize)
if hasattr(x, "to_series"):
x = x.to_series()
elif not isinstance(x, pd.Series):
x = pd.Series(x)
u1 = x.reset_index(drop=True).rolling(5, center=True).median()
u2 = u1.rolling(3, center=True).median()
u3 = 0.25 * (u2.shift(-1) + 2 * u2 + u2.shift(1))
delta = x - u3
if not normalize:
return np.array(delta)
sigma = u1.dropna().std(ddof=1)
return np.array(delta / sigma)
def _tukey53H_numpy(x, normalize=False):
"""Equivalent to tukey53H but without using pandas
Note
----
- For larger datasets (>1k) like timeseries the pandas alternative can be
significantly faster.
"""
if isinstance(x, ma.MaskedArray):
x[x.mask] = np.nan
x = x.data
N = len(x)
u1 = np.nan * np.ones(N)
for n in range(N - 4):
u1[n + 2] = np.median(x[n : n + 5])
u2 = np.nan * np.ones(N)
for n in range(N - 2):
u2[n + 1] = np.median(u1[n : n + 3])
u3 = np.nan * np.ones(N)
u3[1:-1] = 0.25 * (u2[:-2] + 2 * u2[1:-1] + u2[2:])
delta = np.nan * np.ones(N)
delta[1:-1] = x[1:-1] - u3[1:-1]
if not normalize:
return delta
idx = ~np.isnan(u1)
if idx.all():
return np.nan * delta
sigma = np.std(u1[idx], ddof=1)
return delta / sigma
def tukey53H_norm(x, l=12):
"""Spike test Tukey53H() normalized by the std of the low pass
ATTENTION: l option is temporarily deactivated.
l is the number of observations. The default l=12 is trully not
a big number, but this test foccus on spikes, therefore, any
variability longer than 12 is something else.
"""
return tukey53H(x, normalize=True)
class Tukey53H(QCCheckVar):
def set_features(self):
self.features = {"tukey53H": tukey53H(self.data[self.varname])}
if "l" in self.cfg:
self.features["tukey53H_norm"] = tukey53H_norm(self.data[self.varname], l=self.cfg["l"])
def test(self):
"""
I slightly modified the Goring & Nikora 2002. It is
expected that CTD profiles has a typical depth
structure, with a range between surface and bottom.
"""
self.flags = {}
try:
threshold = self.cfg["threshold"]
except KeyError:
print("Deprecated cfg format. It should contain a threshold item.")
threshold = self.cfg["k"]
assert (
(np.size(threshold) == 1)
and (threshold is not None)
and (np.isfinite(threshold))
)
flag = np.zeros(np.shape(self.data[self.varname]), dtype="i1")
feature = np.absolute(self.features["tukey53H"])
flag[feature > threshold] = self.flag_bad
flag[feature <= threshold] = self.flag_good
x = np.atleast_1d(self.data[self.varname])
flag[ma.getmaskarray(x) | ~np.isfinite(x)] = 9
self.flags["tukey53H"] = flag
| bsd-3-clause |
glouppe/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
0x0all/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/numerix/__init__.py | 69 | 5473 | """
numerix imports either Numeric or numarray based on various selectors.
0. If the value "--numpy","--numarray" or "--Numeric" is specified on the
command line, then numerix imports the specified
array package.
1. The value of numerix in matplotlibrc: either Numeric or numarray
2. If none of the above is done, the default array package is Numeric.
Because the matplotlibrc always provides *some* value for numerix
(it has it's own system of default values), this default is most
likely never used.
To summarize: the commandline is examined first, the rc file second,
and the default array package is Numeric.
"""
import sys, os, struct
from matplotlib import rcParams, verbose
which = None, None
use_maskedarray = None
# First, see if --numarray or --Numeric was specified on the command
# line:
for a in sys.argv:
if a in ["--Numeric", "--numeric", "--NUMERIC",
"--Numarray", "--numarray", "--NUMARRAY",
"--NumPy", "--numpy", "--NUMPY", "--Numpy",
]:
which = a[2:], "command line"
if a == "--maskedarray":
use_maskedarray = True
if a == "--ma":
use_maskedarray = False
try: del a
except NameError: pass
if which[0] is None:
try: # In theory, rcParams always has *some* value for numerix.
which = rcParams['numerix'], "rc"
except KeyError:
pass
if use_maskedarray is None:
try:
use_maskedarray = rcParams['maskedarray']
except KeyError:
use_maskedarray = False
# If all the above fail, default to Numeric. Most likely not used.
if which[0] is None:
which = "numeric", "defaulted"
which = which[0].strip().lower(), which[1]
if which[0] not in ["numeric", "numarray", "numpy"]:
raise ValueError("numerix selector must be either 'Numeric', 'numarray', or 'numpy' but the value obtained from the %s was '%s'." % (which[1], which[0]))
if which[0] == "numarray":
import warnings
warnings.warn("numarray use as a numerix backed for matplotlib is deprecated",
DeprecationWarning, stacklevel=1)
#from na_imports import *
from numarray import *
from _na_imports import nx, inf, infinity, Infinity, Matrix, isnan, all
from numarray.numeric import nonzero
from numarray.convolve import cross_correlate, convolve
import numarray
version = 'numarray %s'%numarray.__version__
nan = struct.unpack('d', struct.pack('Q', 0x7ff8000000000000))[0]
elif which[0] == "numeric":
import warnings
warnings.warn("Numeric use as a numerix backed for matplotlib is deprecated",
DeprecationWarning, stacklevel=1)
#from nc_imports import *
from Numeric import *
from _nc_imports import nx, inf, infinity, Infinity, isnan, all, any
from Matrix import Matrix
import Numeric
version = 'Numeric %s'%Numeric.__version__
nan = struct.unpack('d', struct.pack('Q', 0x7ff8000000000000))[0]
elif which[0] == "numpy":
try:
import numpy.oldnumeric as numpy
from numpy.oldnumeric import *
except ImportError:
import numpy
from numpy import *
print 'except asarray', asarray
from _sp_imports import nx, infinity, rand, randn, isnan, all, any
from _sp_imports import UInt8, UInt16, UInt32, Infinity
try:
from numpy.oldnumeric.matrix import Matrix
except ImportError:
Matrix = matrix
version = 'numpy %s' % numpy.__version__
from numpy import nan
else:
raise RuntimeError("invalid numerix selector")
# Some changes are only applicable to the new numpy:
if (which[0] == 'numarray' or
which[0] == 'numeric'):
from mlab import amin, amax
newaxis = NewAxis
def typecode(a):
return a.typecode()
def iscontiguous(a):
return a.iscontiguous()
def byteswapped(a):
return a.byteswapped()
def itemsize(a):
return a.itemsize()
def angle(a):
return arctan2(a.imag, a.real)
else:
# We've already checked for a valid numerix selector,
# so assume numpy.
from mlab import amin, amax
newaxis = NewAxis
from numpy import angle
def typecode(a):
return a.dtype.char
def iscontiguous(a):
return a.flags.contiguous
def byteswapped(a):
return a.byteswap()
def itemsize(a):
return a.itemsize
verbose.report('numerix %s'%version)
# a bug fix for blas numeric suggested by Fernando Perez
matrixmultiply=dot
asum = sum
def _import_fail_message(module, version):
"""Prints a message when the array package specific version of an extension
fails to import correctly.
"""
_dict = { "which" : which[0],
"module" : module,
"specific" : version + module
}
print """
The import of the %(which)s version of the %(module)s module,
%(specific)s, failed. This is is either because %(which)s was
unavailable when matplotlib was compiled, because a dependency of
%(specific)s could not be satisfied, or because the build flag for
this module was turned off in setup.py. If it appears that
%(specific)s was not built, make sure you have a working copy of
%(which)s and then re-install matplotlib. Otherwise, the following
traceback gives more details:\n""" % _dict
g = globals()
l = locals()
__import__('ma', g, l)
__import__('fft', g, l)
__import__('linear_algebra', g, l)
__import__('random_array', g, l)
__import__('mlab', g, l)
la = linear_algebra
ra = random_array
| gpl-3.0 |
fspaolo/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
francesco-mannella/dmp-esn | parametric/parametric_dmp/bin/tr_datasets/e_cursive_curves_angles_LWPR_10/data/results/plot.py | 18 | 1043 | #!/usr/bin/env python
import glob
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
pathname = os.path.dirname(sys.argv[0])
if pathname:
os.chdir(pathname)
n_dim = None
trains = []
for fname in glob.glob("tl*"):
t = np.loadtxt(fname)
trains.append(t)
tests = []
for fname in glob.glob("tt*"):
t = np.loadtxt(fname)
tests.append(t)
trial_results= []
for fname in glob.glob("rtl*"):
t = np.loadtxt(fname)
trial_results.append(t)
test_results= []
for fname in glob.glob("rtt*"):
t = np.loadtxt(fname)
test_results.append(t)
fig = plt.figure()
ax = fig.add_subplot(111, aspect="equal")
for d in trains:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color="blue", lw=3, alpha=0.5)
for d in tests:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color="red", lw=3, alpha=0.5)
for d in trial_results:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=[0,0,.5], lw=2)
for d in test_results:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=[.5,0,0], lw=2)
plt.show()
| gpl-2.0 |
yavalvas/yav_com | build/matplotlib/lib/mpl_toolkits/axes_grid1/inset_locator.py | 8 | 10138 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib.offsetbox import AnchoredOffsetbox
#from matplotlib.transforms import IdentityTransform
import matplotlib.transforms as mtrans
from .parasite_axes import HostAxes # subclasses mpl_axes
from matplotlib.transforms import Bbox, TransformedBbox, IdentityTransform
from matplotlib.patches import Patch
from matplotlib.path import Path
from matplotlib.patches import Rectangle
class InsetPosition(object):
def __init__(self, parent, lbwh):
self.parent = parent
self.lbwh = lbwh # position of the inset axes in
# the normalized coordinate of the parent axes
def __call__(self, ax, renderer):
bbox_parent = self.parent.get_position(original=False)
trans = mtrans.BboxTransformTo(bbox_parent)
bbox_inset = mtrans.Bbox.from_bounds(*self.lbwh)
bb = mtrans.TransformedBbox(bbox_inset, trans)
return bb
class AnchoredLocatorBase(AnchoredOffsetbox):
def __init__(self, bbox_to_anchor, offsetbox, loc,
borderpad=0.5, bbox_transform=None):
super(AnchoredLocatorBase, self).__init__(loc,
pad=0., child=None,
borderpad=borderpad,
bbox_to_anchor=bbox_to_anchor,
bbox_transform=bbox_transform)
def draw(self, renderer):
raise RuntimeError("No draw method should be called")
def __call__(self, ax, renderer):
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self._update_offset_func(renderer, fontsize)
width, height, xdescent, ydescent = self.get_extent(renderer)
px, py = self.get_offset(width, height, 0, 0, renderer)
bbox_canvas = mtrans.Bbox.from_bounds(px, py, width, height)
tr = ax.figure.transFigure.inverted()
bb = mtrans.TransformedBbox(bbox_canvas, tr)
return bb
from . import axes_size as Size
class AnchoredSizeLocator(AnchoredLocatorBase):
def __init__(self, bbox_to_anchor, x_size, y_size, loc,
borderpad=0.5, bbox_transform=None):
self.axes = None
self.x_size = Size.from_any(x_size)
self.y_size = Size.from_any(y_size)
super(AnchoredSizeLocator, self).__init__(bbox_to_anchor, None, loc,
borderpad=borderpad,
bbox_transform=bbox_transform)
def get_extent(self, renderer):
x, y, w, h = self.get_bbox_to_anchor().bounds
dpi = renderer.points_to_pixels(72.)
r, a = self.x_size.get_size(renderer)
width = w*r + a*dpi
r, a = self.y_size.get_size(renderer)
height = h*r + a*dpi
xd, yd = 0, 0
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return width+2*pad, height+2*pad, xd+pad, yd+pad
def __call__(self, ax, renderer):
self.axes = ax
return super(AnchoredSizeLocator, self).__call__(ax, renderer)
class AnchoredZoomLocator(AnchoredLocatorBase):
def __init__(self, parent_axes, zoom, loc,
borderpad=0.5,
bbox_to_anchor=None,
bbox_transform=None):
self.parent_axes = parent_axes
self.zoom = zoom
if bbox_to_anchor is None:
bbox_to_anchor = parent_axes.bbox
super(AnchoredZoomLocator, self).__init__(bbox_to_anchor, None, loc,
borderpad=borderpad,
bbox_transform=bbox_transform)
self.axes = None
def get_extent(self, renderer):
bb = mtrans.TransformedBbox(self.axes.viewLim,
self.parent_axes.transData)
x, y, w, h = bb.bounds
xd, yd = 0, 0
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return w*self.zoom+2*pad, h*self.zoom+2*pad, xd+pad, yd+pad
def __call__(self, ax, renderer):
self.axes = ax
return super(AnchoredZoomLocator, self).__call__(ax, renderer)
class BboxPatch(Patch):
def __init__(self, bbox, **kwargs):
if "transform" in kwargs:
raise ValueError("transform should not be set")
kwargs["transform"] = IdentityTransform()
Patch.__init__(self, **kwargs)
self.bbox = bbox
def get_path(self):
x0, y0, x1, y1 = self.bbox.extents
verts = [(x0, y0),
(x1, y0),
(x1, y1),
(x0, y1),
(x0, y0),
(0, 0)]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY]
return Path(verts, codes)
class BboxConnector(Patch):
@staticmethod
def get_bbox_edge_pos(bbox, loc):
x0, y0, x1, y1 = bbox.extents
if loc == 1:
return x1, y1
elif loc == 2:
return x0, y1
elif loc == 3:
return x0, y0
elif loc == 4:
return x1, y0
@staticmethod
def connect_bbox(bbox1, bbox2, loc1, loc2=None):
if isinstance(bbox1, Rectangle):
transform = bbox1.get_transfrom()
bbox1 = Bbox.from_bounds(0, 0, 1, 1)
bbox1 = TransformedBbox(bbox1, transform)
if isinstance(bbox2, Rectangle):
transform = bbox2.get_transform()
bbox2 = Bbox.from_bounds(0, 0, 1, 1)
bbox2 = TransformedBbox(bbox2, transform)
if loc2 is None:
loc2 = loc1
x1, y1 = BboxConnector.get_bbox_edge_pos(bbox1, loc1)
x2, y2 = BboxConnector.get_bbox_edge_pos(bbox2, loc2)
verts = [[x1, y1], [x2, y2]]
#Path()
codes = [Path.MOVETO, Path.LINETO]
return Path(verts, codes)
def __init__(self, bbox1, bbox2, loc1, loc2=None, **kwargs):
"""
*path* is a :class:`matplotlib.path.Path` object.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
kwargs["transform"] = IdentityTransform()
Patch.__init__(self, **kwargs)
self.bbox1 = bbox1
self.bbox2 = bbox2
self.loc1 = loc1
self.loc2 = loc2
def get_path(self):
return self.connect_bbox(self.bbox1, self.bbox2,
self.loc1, self.loc2)
class BboxConnectorPatch(BboxConnector):
def __init__(self, bbox1, bbox2, loc1a, loc2a, loc1b, loc2b, **kwargs):
if "transform" in kwargs:
raise ValueError("transform should not be set")
BboxConnector.__init__(self, bbox1, bbox2, loc1a, loc2a, **kwargs)
self.loc1b = loc1b
self.loc2b = loc2b
def get_path(self):
path1 = self.connect_bbox(self.bbox1, self.bbox2, self.loc1, self.loc2)
path2 = self.connect_bbox(self.bbox2, self.bbox1,
self.loc2b, self.loc1b)
path_merged = (list(path1.vertices) +
list(path2.vertices) +
[path1.vertices[0]])
return Path(path_merged)
def _add_inset_axes(parent_axes, inset_axes):
parent_axes.figure.add_axes(inset_axes)
inset_axes.set_navigate(False)
def inset_axes(parent_axes, width, height, loc=1,
bbox_to_anchor=None, bbox_transform=None,
axes_class=None,
axes_kwargs=None,
**kwargs):
if axes_class is None:
axes_class = HostAxes
if axes_kwargs is None:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position())
else:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),
**axes_kwargs)
if bbox_to_anchor is None:
bbox_to_anchor = parent_axes.bbox
axes_locator = AnchoredSizeLocator(bbox_to_anchor,
width, height,
loc=loc,
bbox_transform=bbox_transform,
**kwargs)
inset_axes.set_axes_locator(axes_locator)
_add_inset_axes(parent_axes, inset_axes)
return inset_axes
def zoomed_inset_axes(parent_axes, zoom, loc=1,
bbox_to_anchor=None, bbox_transform=None,
axes_class=None,
axes_kwargs=None,
**kwargs):
if axes_class is None:
axes_class = HostAxes
if axes_kwargs is None:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position())
else:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),
**axes_kwargs)
axes_locator = AnchoredZoomLocator(parent_axes, zoom=zoom, loc=loc,
bbox_to_anchor=bbox_to_anchor,
bbox_transform=bbox_transform,
**kwargs)
inset_axes.set_axes_locator(axes_locator)
_add_inset_axes(parent_axes, inset_axes)
return inset_axes
def mark_inset(parent_axes, inset_axes, loc1, loc2, **kwargs):
rect = TransformedBbox(inset_axes.viewLim, parent_axes.transData)
pp = BboxPatch(rect, **kwargs)
parent_axes.add_patch(pp)
p1 = BboxConnector(inset_axes.bbox, rect, loc1=loc1, **kwargs)
inset_axes.add_patch(p1)
p1.set_clip_on(False)
p2 = BboxConnector(inset_axes.bbox, rect, loc1=loc2, **kwargs)
inset_axes.add_patch(p2)
p2.set_clip_on(False)
return pp, p1, p2
| mit |
alvarofierroclavero/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
alexeyum/scikit-learn | sklearn/covariance/tests/test_covariance.py | 79 | 12193 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert_greater(np.amin(mahal_dist), 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def _naive_ledoit_wolf_shrinkage(X):
# A simple implementation of the formulas from Ledoit & Wolf
# The computation below achieves the following computations of the
# "O. Ledoit and M. Wolf, A Well-Conditioned Estimator for
# Large-Dimensional Covariance Matrices"
# beta and delta are given in the beginning of section 3.2
n_samples, n_features = X.shape
emp_cov = empirical_covariance(X, assume_centered=False)
mu = np.trace(emp_cov) / n_features
delta_ = emp_cov.copy()
delta_.flat[::n_features + 1] -= mu
delta = (delta_ ** 2).sum() / n_features
X2 = X ** 2
beta_ = 1. / (n_features * n_samples) \
* np.sum(np.dot(X2.T, X2) / n_samples - emp_cov ** 2)
beta = min(beta_, delta)
shrinkage = beta / delta
return shrinkage
def test_ledoit_wolf_small():
# Compare our blocked implementation to the naive implementation
X_small = X[:, :4]
lw = LedoitWolf()
lw.fit(X_small)
shrinkage_ = lw.shrinkage_
assert_almost_equal(shrinkage_, _naive_ledoit_wolf_shrinkage(X_small))
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
nuclear-wizard/moose | python/peacock/tests/postprocessor_tab/test_PostprocessorViewer.py | 12 | 4638 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import sys
import os
import unittest
import shutil
from PyQt5 import QtCore, QtWidgets
from peacock.PostprocessorViewer.PostprocessorViewer import PostprocessorViewer
from peacock.utils import Testing
import mooseutils
class TestPostprocessorViewer(Testing.PeacockImageTestCase):
"""
Test class for the ArtistToggleWidget which toggles postprocessor lines.
"""
#: QApplication: The main App for QT, this must be static to work correctly.
qapp = QtWidgets.QApplication(sys.argv)
def setUp(self):
"""
Creates the GUI containing the ArtistGroupWidget and the matplotlib figure axes.
"""
import matplotlib
matplotlib.rcParams["figure.figsize"] = (6.4, 4.8)
matplotlib.rcParams["figure.dpi"] = (100)
self._filename = "{}_test.csv".format(self.__class__.__name__)
self._widget = PostprocessorViewer(reader=mooseutils.PostprocessorReader, timeout=None)
self._widget.onSetFilenames([self._filename])
def copyfiles(self):
"""
Copy to temprary location.
"""
shutil.copyfile('../input/white_elephant_jan_2016.csv', self._filename)
for data in self._widget._data:
data.load()
def tearDown(self):
"""
Remove temporary.
"""
if os.path.exists(self._filename):
os.remove(self._filename)
def write(self, filename):
"""
Overload the write method.
"""
self._widget.currentWidget().OutputPlugin.write.emit(filename)
def plot(self):
"""
Create plot with all widgets modified.
"""
widget = self._widget.currentWidget()
# Plot some data
toggle = widget.PostprocessorSelectPlugin._groups[0]._toggles['air_temp_set_1']
toggle.CheckBox.setCheckState(QtCore.Qt.Checked)
toggle.PlotAxis.setCurrentIndex(1)
toggle.LineStyle.setCurrentIndex(1)
toggle.LineWidth.setValue(5)
toggle.clicked.emit()
# Add title and legend
ax = widget.AxesSettingsPlugin
ax.Title.setText('Snow Data')
ax.Title.editingFinished.emit()
ax.Legend2.setCheckState(QtCore.Qt.Checked)
ax.Legend2.clicked.emit(True)
ax.Legend2Location.setCurrentIndex(4)
ax.Legend2Location.currentIndexChanged.emit(4)
ax.onAxesModified()
# Set limits and axis titles (y2-only)
ax = widget.AxisTabsPlugin.Y2AxisTab
ax.Label.setText('Air Temperature [C]')
ax.Label.editingFinished.emit()
ax.RangeMinimum.setText('0')
ax.RangeMinimum.editingFinished.emit()
def testEmpty(self):
"""
Test that empty plot is working.
"""
self.assertImage('testEmpty.png')
def testWidgets(self):
"""
Test that the widgets contained in PostprocessorPlotWidget are working.
"""
self.copyfiles()
self.plot()
self.assertImage('testWidgets.png')
self.assertFalse(self._widget.cornerWidget().CloseButton.isEnabled())
self.assertEqual(self._widget.tabText(self._widget.currentIndex()), 'Results')
def testCloneClose(self):
"""
Test clone button works.
"""
self.copyfiles()
self._widget.cornerWidget().clone.emit()
self.assertEqual(self._widget.count(), 2)
self.assertEqual(self._widget.tabText(self._widget.currentIndex()), 'Results (2)')
self.assertTrue(self._widget.cornerWidget().CloseButton.isEnabled())
self.assertImage('testEmpty.png')
# Plot something
self.plot()
self.assertImage('testWidgets.png')
# Switch to first tab
self._widget.setCurrentIndex(0)
self.assertEqual(self._widget.tabText(self._widget.currentIndex()), 'Results')
self.assertImage('testEmpty.png')
self.plot()
self.assertImage('testWidgets.png')
# Close the first tab
self._widget.cornerWidget().close.emit()
self.assertEqual(self._widget.count(), 1)
self.assertEqual(self._widget.tabText(self._widget.currentIndex()), 'Results (2)')
self.assertFalse(self._widget.cornerWidget().CloseButton.isEnabled())
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
| lgpl-2.1 |
smarden1/airflow | airflow/hooks/postgres_hook.py | 3 | 2076 | import psycopg2
from airflow import settings
from airflow.utils import AirflowException
from airflow.models import Connection
class PostgresHook(object):
'''
Interact with Postgres.
'''
def __init__(
self, host=None, login=None,
psw=None, db=None, port=None, postgres_conn_id=None):
if not postgres_conn_id:
self.host = host
self.login = login
self.psw = psw
self.db = db
self.port = port
else:
session = settings.Session()
db = session.query(
Connection).filter(
Connection.conn_id == postgres_conn_id)
if db.count() == 0:
raise AirflowException("The postgres_dbid you provided isn't defined")
else:
db = db.all()[0]
self.host = db.host
self.login = db.login
self.psw = db.password
self.db = db.schema
self.port = db.port
session.commit()
session.close()
def get_conn(self):
conn = psycopg2.connect(
host=self.host,
user=self.login,
password=self.psw,
dbname=self.db,
port=self.port)
return conn
def get_records(self, sql):
'''
Executes the sql and returns a set of records.
'''
conn = self.get_conn()
cur = conn.cursor()
cur.execute(sql)
rows = cur.fetchall()
cur.close()
conn.close()
return rows
def get_pandas_df(self, sql):
'''
Executes the sql and returns a pandas dataframe
'''
import pandas.io.sql as psql
conn = self.get_conn()
df = psql.read_sql(sql, con=conn)
conn.close()
return df
def run(self, sql, autocommit=False):
conn = self.get_conn()
conn.autocommit = autocommit
cur = conn.cursor()
cur.execute(sql)
conn.commit()
cur.close()
conn.close()
| apache-2.0 |
dvida/UWO-PA-Python-Course | Lecture 6/L6_lecture.py | 1 | 6176 | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
# Let's create some noisy data that should follow a line
# Parameters of a line
m = 2.6
k = 10.8
# Let's define a function describing a line
def line(x, m, k):
return m*x + k
# Generate the line data
x = np.linspace(0, 10, 100)
line_data = line(x, m, k)
# Add noise to the line
line_data += np.random.normal(0, 1, line_data.shape)
plt.hold(True)
# Plot the line data
plt.scatter(x, line_data)
# plt.show()
# Import needed scipy libraries
import scipy.optimize
# Fit the line
popt, pcov = scipy.optimize.curve_fit(line, x, line_data)
print('Fit params:', popt)
plt.plot(x, line(x, *popt))
# plt.show()
plt.clf()
# See the documentation how to get the stddev of each parameter
print('Stddev:', np.sqrt(np.diag(pcov)))
# CONGRATS! YOUR FIRST SUCCESSFUL LINEAR REGRESSION IN PYTHON!
# Why should we be very careful when we are doing any kind of regression:
# Anscobe's Quartet: https://en.wikipedia.org/wiki/Anscombe%27s_quartet
# What are the alternatives?
# http://scikit-learn.org/stable/auto_examples/linear_model/plot_theilsen.html
# Let's add some outliers to our data
line_data[10:12] -= 30
line_data[98:] += 50
x[-1] += 10
plt.scatter(x, line_data)
# plt.show()
# If we do the LS fit again...
popt, pconv = scipy.optimize.curve_fit(line, x, line_data)
# Plot fitted line
plt.plot(x, line(x, *popt), label='LS')
# Plot original line
plt.plot(x, line(x, m, k), color='k', label='Original')
# plt.show()
# CHECK THAT scikit-learn is installed!
from sklearn.linear_model import RANSACRegressor, TheilSenRegressor
# Reshaping the data (required by the scikit functions)
x = x.reshape(-1, 1)
line_data = line_data.reshape(-1, 1)
# RANSAC details: http://scipy-cookbook.readthedocs.io/items/RANSAC.html
# RANSAC works on non-linear problems as well, often using in Computer Vision.
# Init the RANSAC regressor
ransac = RANSACRegressor()
# Fit with RANSAC
ransac.fit(x, line_data)
# Get the fitted data result
line_ransac = ransac.predict(x)
# Show the RANSAC fit
plt.plot(x, line_ransac, color='yellow', label='RANSAC')
# plt.show()
# Theil-Sen estimator:
# General info: https://en.wikipedia.org/wiki/Theil%E2%80%93Sen_estimator
# Good ONLY for LINEAR REGRESSION
# Sci-kit learn implementation: http://scikit-learn.org/stable/auto_examples/linear_model/plot_theilsen.html
# Init the Theil-Sen estimator instance
theil = TheilSenRegressor()
# Fit with the Theil-Sen estimator
theil.fit(x, line_data)
# Get the fitted data result
line_theil = theil.predict(x)
# Plot Theil-Sen results
plt.plot(x, line_theil, color='red', label='Theil-Sen')
plt.legend(loc='lower right')
plt.show()
plt.clf()
###################################
# Minimization - e.g. how to find a minimum of a function?
def f1(x):
""" A tricky function to minimize. """
return 0.1*x**2 + 2*np.sin(2*x)
x = np.linspace(-10, 10, 100)
# Plot the tricky function
plt.plot(x, f1(x))
# Try changing the inital estimage (first try 0, then try 5)
x0 = 5
# Find the minimum using BFGS algorithm
res = scipy.optimize.minimize(f1, x0)
# Find global minimum using basin hopping
res = scipy.optimize.basinhopping(f1, x0, niter=2000)
print (res.x)
# Plot mimumum point
plt.scatter(res.x, f1(res.x))
plt.show()
plt.clf()
###################################
# Fitting nonlinear models
# Task 1
###################################
### NOT IN LECTURE
### EXTRA: ROBUST FIT attempt
# Difficult function to fit
def func(x, a, b, c, d, e):
return a*np.sin(b*x) + c*x**2 + d*x + e
x = np.linspace(0, 10, 1000)
# Generte function data
y_data = func(x, 1.5, 2, 0.1, 0.1, 3)
# Plot the model data
plt.plot(x, y_data, color='red', label='Underlying model')
# Add noise
y_data_noise = y_data + np.random.normal(0, 0.5, y_data.shape)
# Plot noisy data
plt.plot(x, y_data_noise, alpha=0.5, label='Noisy data')
# Fit the function to the noisy data, regular LS
popt, pcov = scipy.optimize.curve_fit(func, x, y_data_noise)
# Plot LS fit results
plt.plot(x, func(x, *popt), color='green', label='LS fit')
# Read more about robust regression:
# http://scipy-cookbook.readthedocs.io/items/robust_regression.html
# Define a function for computing residuals
def residuals(params, x, y):
""" Returns the residuals between the predicted and input values of the model
Arguments:
params: [ndarray] function parameters
x: [ndarray] independant variable
y: [ndarray] prediction
Return:
residuals: [ndarray]
"""
return func(x, *params) - y
# Set initial guess of parameters to 1 (array of size 5, same as the number of parameters of our function)
x0 = np.ones(5)
# Try to do a robust fit (doesn't always work, but it is better than ordinary LS)
fit_robust_ls = scipy.optimize.least_squares(residuals, x0, loss='cauchy', f_scale=0.1, args=(x, y_data_noise))
def residuals_minimize(params, x, y):
""" Wrapper function for calculating fit residuals for minimization. """
# Squared value of each residual
z = residuals(params, x, y)**2
# Smooth approximation of l1 (absolute value) loss
return np.sum(2*((1 + z)**0.5 - 1))
# Treat the fit as a minimization problem, but use basinhopping for minimizing residuals
fit_robust_mini = scipy.optimize.basinhopping(residuals_minimize, x0, minimizer_kwargs={'args':(x, y_data_noise)})
# Plot the robust fit results
plt.plot(x, func(x, *fit_robust_ls.x), color='yellow', label='Robust fit - least squares')
plt.plot(x, func(x, *fit_robust_mini.x), color='black', label='Robust fit - basinhopping')
plt.legend(loc='lower right')
plt.show()
# For better results, Markov-Chain Monte Carlo fitting can be used:
# https://sciencehouse.wordpress.com/2010/06/23/mcmc-and-fitting-models-to-data/
# MCMC Python implementation:
# https://github.com/dvida/mcmc-fit-py/blob/master/MCMC%20fit.py | mit |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/matplotlib/tests/test_agg.py | 3 | 4896 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import io
import os
import numpy as np
from numpy.testing import assert_array_almost_equal
from matplotlib.image import imread
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.testing.decorators import cleanup
from matplotlib import pyplot as plt
from matplotlib import collections
from matplotlib import path
@cleanup
def test_repeated_save_with_alpha():
# We want an image which has a background color of bluish green, with an
# alpha of 0.25.
fig = Figure([1, 0.4])
canvas = FigureCanvas(fig)
fig.set_facecolor((0, 1, 0.4))
fig.patch.set_alpha(0.25)
# The target color is fig.patch.get_facecolor()
buf = io.BytesIO()
fig.savefig(buf,
facecolor=fig.get_facecolor(),
edgecolor='none')
# Save the figure again to check that the
# colors don't bleed from the previous renderer.
buf.seek(0)
fig.savefig(buf,
facecolor=fig.get_facecolor(),
edgecolor='none')
# Check the first pixel has the desired color & alpha
# (approx: 0, 1.0, 0.4, 0.25)
buf.seek(0)
assert_array_almost_equal(tuple(imread(buf)[0, 0]),
(0.0, 1.0, 0.4, 0.250),
decimal=3)
@cleanup
def test_large_single_path_collection():
buff = io.BytesIO()
# Generates a too-large single path in a path collection that
# would cause a segfault if the draw_markers optimization is
# applied.
f, ax = plt.subplots()
collection = collections.PathCollection(
[path.Path([[-10, 5], [10, 5], [10, -5], [-10, -5], [-10, 5]])])
ax.add_artist(collection)
ax.set_xlim(10**-3, 1)
plt.savefig(buff)
def report_memory(i):
pid = os.getpid()
a2 = os.popen('ps -p %d -o rss,sz' % pid).readlines()
print(i, ' ', a2[1], end=' ')
return int(a2[1].split()[0])
# This test is disabled -- it uses old API. -ADS 2009-09-07
## def test_memleak():
## """Test agg backend for memory leaks."""
## from matplotlib.ft2font import FT2Font
## from numpy.random import rand
## from matplotlib.backend_bases import GraphicsContextBase
## from matplotlib.backends._backend_agg import RendererAgg
## fontname = '/usr/local/share/matplotlib/Vera.ttf'
## N = 200
## for i in range( N ):
## gc = GraphicsContextBase()
## gc.set_clip_rectangle( [20, 20, 20, 20] )
## o = RendererAgg( 400, 400, 72 )
## for j in range( 50 ):
## xs = [ 400*int(rand()) for k in range(8) ]
## ys = [ 400*int(rand()) for k in range(8) ]
## rgb = (1, 0, 0)
## pnts = zip( xs, ys )
## o.draw_polygon( gc, rgb, pnts )
## o.draw_polygon( gc, None, pnts )
## for j in range( 50 ):
## x = [ 400*int(rand()) for k in range(4) ]
## y = [ 400*int(rand()) for k in range(4) ]
## o.draw_lines( gc, x, y )
## for j in range( 50 ):
## args = [ 400*int(rand()) for k in range(4) ]
## rgb = (1, 0, 0)
## o.draw_rectangle( gc, rgb, *args )
## if 1: # add text
## font = FT2Font( fontname )
## font.clear()
## font.set_text( 'hi mom', 60 )
## font.set_size( 12, 72 )
## o.draw_text_image( font.get_image(), 30, 40, gc )
## fname = "agg_memleak_%05d.png"
## o.write_png( fname % i )
## val = report_memory( i )
## if i==1: start = val
## end = val
## avgMem = (end - start) / float(N)
## print 'Average memory consumed per loop: %1.4f\n' % (avgMem)
## #TODO: Verify the expected mem usage and approximate tolerance that
## # should be used
## #self.checkClose( 0.32, avgMem, absTol = 0.1 )
## # w/o text and w/o write_png: Average memory consumed per loop: 0.02
## # w/o text and w/ write_png : Average memory consumed per loop: 0.3400
## # w/ text and w/ write_png : Average memory consumed per loop: 0.32
@cleanup
def test_marker_with_nan():
# This creates a marker with nans in it, which was segfaulting the
# Agg backend (see #3722)
fig, ax = plt.subplots(1)
steps = 1000
data = np.arange(steps)
ax.semilogx(data)
ax.fill_between(data, data*0.8, data*1.2)
buf = io.BytesIO()
fig.savefig(buf, format='png')
@cleanup
def test_long_path():
buff = io.BytesIO()
fig, ax = plt.subplots()
np.random.seed(0)
points = np.random.rand(70000)
ax.plot(points)
fig.savefig(buff, format='png')
if __name__ == "__main__":
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| apache-2.0 |
ibaidev/gplib | gplib/plot.py | 1 | 7181 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Ibai Roman
#
# This file is part of GPlib.
#
# GPlib is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GPlib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GPlib. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from mpl_toolkits.mplot3d import Axes3D
plt.style.use('ggplot')
class Plot(object):
@staticmethod
def kernel(kernel, file_name=None):
"""
Plot a kernel implementation
:param file_name:
:type file_name:
:param kernel:
:type kernel:
:return:
:rtype:
"""
plt.figure(figsize=(11, 4))
plt.clf()
plt.suptitle(kernel.__class__.__name__)
plt.subplot(121)
dist = np.linspace(0, 3, 300)[:, None]
covar = kernel.marginalize_covariance(dist, np.array([[0.0]]))
covar = covar.reshape(-1)
plt.plot(dist, covar, linestyle='-', color='#1f77b4', ms=40)
plt.xlabel("input, x")
plt.ylabel("covariance, k(x, 0)")
plt.subplot(122)
x_grid = np.linspace(-3.0, 3.0, 100)[:, None]
y_grid = np.linspace(-3.0, 3.0, 100)[:, None]
covar = kernel.marginalize_covariance(x_grid, y_grid)
covar = np.flip(covar, 0)
plt.imshow(
covar,
cmap='Blues',
interpolation='none',
extent=[-3.0, 3.0, -3.0, 3.0]
)
cb = plt.colorbar()
cb.set_label("covariance, k(x, x')")
plt.xlabel("input, x")
plt.ylabel("input, x'")
if file_name is None:
plt.show()
else:
plt.savefig(file_name, bbox_inches='tight')
plt.close()
@staticmethod
def gp_1d(gp, data, test_data=None, file_name=None, n_samples=0):
"""
Plot gp in 1D
:param gp:
:type gp:
:param data:
:type data:
:param test_data:
:type test_data:
:param file_name:
:type file_name:
:param n_samples:
:type n_samples:
:return:
:rtype:
"""
plt.figure(figsize=(5, 4))
show_data, min_values, max_values, diff_values = \
Plot._get_plot_bounds(data, test_data)
plot_points = np.arange(
min_values[0],
max_values[0],
diff_values[0] / 500.0
)[:, None]
mean = gp.mean_function.marginalize_mean(
plot_points)
var = gp.covariance_function.marginalize_covariance(
plot_points, only_diagonal=True)
sdev = 3.0 * np.sqrt(var)
plt.clf()
if 0 < n_samples:
posterior_samples = gp.sample(plot_points, n_samples=n_samples)
plt.plot(
plot_points.flatten().tolist(),
posterior_samples, color='#1f77b4', linewidth=0.5
)
plt.plot(
plot_points.flatten().tolist(),
mean.flatten().tolist(), color='#1f77b4', linewidth=2
)
ax = plt.gca()
ax.fill_between(
plot_points.flatten().tolist(),
(mean - sdev).flatten().tolist(),
(mean + sdev).flatten().tolist(),
color='#aec7e8'
)
plt.plot(
show_data['X'].flatten().tolist(),
show_data['Y'].flatten().tolist(), color='#444444',
linestyle='None', marker='o', markersize=4
)
if test_data is not None:
plt.plot(
test_data['X'].flatten().tolist(),
test_data['Y'].flatten().tolist(), color='#d62728',
linestyle='None', marker='^', markersize=4
)
if file_name is None:
plt.show()
else:
plt.savefig(file_name, bbox_inches='tight')
plt.close()
@staticmethod
def gp_2d(gp, data, test_data=None, resolution=40, file_name=None):
"""
Plot gp in 2D
:param resolution:
:type resolution:
:param test_data:
:type test_data:
:param file_name:
:type file_name:
:param data:
:type data:
:param gp:
:type gp:
:return:
:rtype:
"""
show_data, min_values, max_values, _ = \
Plot._get_plot_bounds(data, test_data)
# Init grid
x_grid = np.linspace(min_values[0], max_values[0], resolution)
y_grid = np.linspace(min_values[1], max_values[1], resolution)
x_grid, y_grid = np.meshgrid(x_grid, y_grid)
plot_points = np.vstack((x_grid.flatten(), y_grid.flatten())).T
mean = gp.mean_function.marginalize_mean(
plot_points)
# Init plot
fig = plt.figure()
axis = fig.gca(projection='3d')
# plot data
axis.scatter(
show_data['X'][:, 0].tolist(),
show_data['X'][:, 1].tolist(),
show_data['Y'].tolist(), s=20, color='#444444', marker='o'
)
# plot test data
if test_data is not None:
axis.scatter(
test_data['X'][:, 0].tolist(),
test_data['X'][:, 1].tolist(),
test_data['Y'].tolist(), s=20, color='#d62728', marker='^'
)
# plot GP
z_points = mean
z_points = z_points.reshape(resolution, resolution)
axis.plot_surface(
x_grid, y_grid,
z_points, rstride=1,
cstride=1, cmap='Blues', linewidth=0,
antialiased=False, alpha=0.3
)
axis.zaxis.set_major_locator(LinearLocator(10))
axis.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
if file_name is None:
plt.show()
else:
plt.savefig(file_name, bbox_inches='tight')
plt.close()
@staticmethod
def _get_plot_bounds(data, test_data=None, tail=0.5):
focus_data = data
if test_data is not None:
focus_data = test_data
points = focus_data['X']
min_values = np.min(points, axis=0)
max_values = np.max(points, axis=0)
diff_values = max_values - min_values
min_values -= diff_values * tail
max_values += diff_values * tail
diff_values = max_values - min_values
show_i = np.logical_and(
np.all(min_values < data['X'], axis=1),
np.all(data['X'] < max_values, axis=1)
)
show_data = {
'X': data['X'][show_i],
'Y': data['Y'][show_i]
}
return show_data, min_values, max_values, diff_values
| gpl-3.0 |
yavalvas/yav_com | build/matplotlib/examples/pylab_examples/multiple_yaxis_with_spines.py | 6 | 1582 | import matplotlib.pyplot as plt
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.itervalues():
sp.set_visible(False)
fig, host = plt.subplots()
fig.subplots_adjust(right=0.75)
par1 = host.twinx()
par2 = host.twinx()
# Offset the right spine of par2. The ticks and label have already been
# placed on the right by twinx above.
par2.spines["right"].set_position(("axes", 1.2))
# Having been created by twinx, par2 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(par2)
# Second, show the right spine.
par2.spines["right"].set_visible(True)
p1, = host.plot([0, 1, 2], [0, 1, 2], "b-", label="Density")
p2, = par1.plot([0, 1, 2], [0, 3, 2], "r-", label="Temperature")
p3, = par2.plot([0, 1, 2], [50, 30, 15], "g-", label="Velocity")
host.set_xlim(0, 2)
host.set_ylim(0, 2)
par1.set_ylim(0, 4)
par2.set_ylim(1, 65)
host.set_xlabel("Distance")
host.set_ylabel("Density")
par1.set_ylabel("Temperature")
par2.set_ylabel("Velocity")
host.yaxis.label.set_color(p1.get_color())
par1.yaxis.label.set_color(p2.get_color())
par2.yaxis.label.set_color(p3.get_color())
tkw = dict(size=4, width=1.5)
host.tick_params(axis='y', colors=p1.get_color(), **tkw)
par1.tick_params(axis='y', colors=p2.get_color(), **tkw)
par2.tick_params(axis='y', colors=p3.get_color(), **tkw)
host.tick_params(axis='x', **tkw)
lines = [p1, p2, p3]
host.legend(lines, [l.get_label() for l in lines])
plt.show()
| mit |
rkmaddox/expyfun | doc/conf.py | 1 | 10310 | # -*- coding: utf-8 -*-
#
# Expyfun documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 11 10:45:48 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import inspect
import os
from os.path import relpath, dirname
import sys
from datetime import date
import sphinx_gallery # noqa
import sphinx_bootstrap_theme
from numpydoc import numpydoc, docscrape # noqa
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curdir = os.path.dirname(__file__)
sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'expyfun')))
sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))
import expyfun
if not os.path.isdir('_images'):
os.mkdir('_images')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx_gallery.gen_gallery',
'numpydoc',
]
autosummary_generate = True
autodoc_default_flags = ['inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'expyfun'
td = date.today()
copyright = u'2013-%s, expyfun developers. Last updated on %s' % (td.year,
td.isoformat())
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = expyfun.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
exclude_patterns = ['source/generated']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['expyfun.']
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'navbar_title': 'expyfun',
'source_link_position': "nav", # default
'bootswatch_theme': "yeti",
'navbar_sidebarrel': False, # Render the next/prev links in navbar?
'navbar_pagenav': False,
'navbar_class': "navbar",
'bootstrap_version': "3", # default
'navbar_links': [
("API reference", "python_reference"),
("Examples", "auto_examples/index"),
("Whats new", "whats_new"),
],
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/favicon.ico"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static', '_images']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# variables to pass to HTML templating engine
build_dev_html = bool(int(os.environ.get('BUILD_DEV_HTML', False)))
html_context = {'use_google_analytics': True, 'use_twitter': True,
'use_media_buttons': True, 'build_dev_html': build_dev_html}
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'expyfun-doc'
trim_doctests_flags = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'numpy': ('https://docs.scipy.org/doc/numpy-dev/', None),
'scipy': ('https://scipy.github.io/devdocs/', None),
'sklearn': ('http://scikit-learn.org/stable/', None),
'matplotlib': ('http://matplotlib.org/', None),
}
examples_dirs = ['../examples']
gallery_dirs = ['auto_examples']
sphinx_gallery_conf = {
'doc_module': ('expyfun',),
'reference_url': {
'expyfun': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy/reference',
'scipy': 'http://docs.scipy.org/doc/scipy/reference',
'mayavi': 'http://docs.enthought.com/mayavi/mayavi'},
'examples_dirs': examples_dirs,
'gallery_dirs': gallery_dirs,
'find_mayavi_figures': False,
'default_thumb_file': os.path.join('_static', 'favicon.ico'),
'backreferences_dir': 'generated',
'filename_pattern': '/.*(?<!_)\.py$', # anything that isn't *_.py
}
numpydoc_class_members_toctree = False
# -----------------------------------------------------------------------------
# Source code links (adapted from SciPy (doc/source/conf.py))
# -----------------------------------------------------------------------------
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(expyfun.__file__))
if 'dev' in expyfun.__version__:
return "https://github.com/LABSN/expyfun/blob/master/expyfun/%s%s" % ( # noqa
fn, linespec)
else:
return "https://github.com/LABSN/expyfun/blob/maint/%s/expyfun/%s%s" % ( # noqa
expyfun.__version__, fn, linespec)
| bsd-3-clause |
wsmorgan/782 | docs/conf.py | 1 | 10259 | # -*- coding: utf-8 -*-
#
# basis documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 27 09:52:04 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinxcontrib.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'basis'
copyright = u'2016, Wiley Morgan'
author = u'Wiley Morgan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0'
# The full version, including alpha/beta/rc tags.
release = u'0.0.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'basis v0.0.5'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'basisdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'basis.tex', u'basis Documentation',
u'Wiley Morgan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'basis', u'basis Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'basis', u'basis Documentation',
author, 'basis', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'matplotlib': ('http://matplotlib.org/', None)}
| mit |
Avsecz/concise | concise/utils/plot.py | 1 | 9272 | import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
import math
from concise.preprocessing.sequence import DNA, RNA, AMINO_ACIDS
from concise.utils.letters import all_letters
from collections import OrderedDict
from matplotlib import pyplot
from matplotlib.patches import PathPatch
from matplotlib.path import Path
from descartes.patch import Polygon, PolygonPath, PolygonPatch
from shapely.wkt import loads as load_wkt
from shapely import affinity
import re
def heatmap(w, vmin=None, vmax=None, diverge_color=False,
ncol=1,
plot_name=None, vocab=["A", "C", "G", "T"], figsize=(6, 2)):
"""Plot a heatmap from weight matrix w
vmin, vmax = z axis range
diverge_color = Should we use diverging colors?
plot_name = plot_title
vocab = vocabulary (corresponds to the first axis)
"""
# Generate y and x values from the dimension lengths
assert len(vocab) == w.shape[0]
plt_y = np.arange(w.shape[0] + 1) + 0.5
plt_x = np.arange(w.shape[1] + 1) - 0.5
z_min = w.min()
z_max = w.max()
if vmin is None:
vmin = z_min
if vmax is None:
vmax = z_max
if diverge_color:
color_map = plt.cm.RdBu
else:
color_map = plt.cm.Blues
fig = plt.figure(figsize=figsize)
# multiple axis
if len(w.shape) == 3:
#
n_plots = w.shape[2]
nrow = math.ceil(n_plots / ncol)
else:
n_plots = 1
nrow = 1
ncol = 1
for i in range(n_plots):
if len(w.shape) == 3:
w_cur = w[:, :, i]
else:
w_cur = w
ax = plt.subplot(nrow, ncol, i + 1)
plt.tight_layout()
im = ax.pcolormesh(plt_x, plt_y, w_cur, cmap=color_map,
vmin=vmin, vmax=vmax, edgecolors="white")
ax.grid(False)
ax.set_yticklabels([""] + vocab, minor=False)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xticks(np.arange(w_cur.shape[1] + 1))
ax.set_xlim(plt_x.min(), plt_x.max())
ax.set_ylim(plt_y.min(), plt_y.max())
# nice scale location:
# http://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(im, cax=cax)
if plot_name is not None:
if n_plots > 0:
pln = plot_name + " {0}".format(i)
else:
pln = plot_name
ax.set_title(pln)
ax.set_aspect('equal')
return fig
# -----------------------------------------------------------------------
#
#
# Code adoped from https://github.com/kundajelab/dragonn, (c) 2016 Kundaje Lab
def standardize_polygons_str(data_str):
"""Given a POLYGON string, standardize the coordinates to a 1x1 grid.
Input : data_str (taken from above)
Output: tuple of polygon objects
"""
# find all of the polygons in the letter (for instance an A
# needs to be constructed from 2 polygons)
path_strs = re.findall("\(\(([^\)]+?)\)\)", data_str.strip())
# convert the data into a numpy array
polygons_data = []
for path_str in path_strs:
data = np.array([
tuple(map(float, x.split())) for x in path_str.strip().split(",")])
polygons_data.append(data)
# standardize the coordinates
min_coords = np.vstack(data.min(0) for data in polygons_data).min(0)
max_coords = np.vstack(data.max(0) for data in polygons_data).max(0)
for data in polygons_data:
data[:, ] -= min_coords
data[:, ] /= (max_coords - min_coords)
polygons = []
for data in polygons_data:
polygons.append(load_wkt(
"POLYGON((%s))" % ",".join(" ".join(map(str, x)) for x in data)))
return tuple(polygons)
# ----------------------
letter_polygons = {k: standardize_polygons_str(v) for k, v in all_letters.items()}
VOCABS = {"DNA": OrderedDict([("A", "green"),
("C", "blue"),
("G", "orange"),
("T", "red")]),
"RNA": OrderedDict([("A", "green"),
("C", "blue"),
("G", "orange"),
("U", "red")]),
"AA": OrderedDict([('A', '#CCFF00'),
('B', "orange"),
('C', '#FFFF00'),
('D', '#FF0000'),
('E', '#FF0066'),
('F', '#00FF66'),
('G', '#FF9900'),
('H', '#0066FF'),
('I', '#66FF00'),
('K', '#6600FF'),
('L', '#33FF00'),
('M', '#00FF00'),
('N', '#CC00FF'),
('P', '#FFCC00'),
('Q', '#FF00CC'),
('R', '#0000FF'),
('S', '#FF3300'),
('T', '#FF6600'),
('V', '#99FF00'),
('W', '#00CCFF'),
('Y', '#00FFCC'),
('Z', 'blue')]),
"RNAStruct": OrderedDict([("P", "red"),
("H", "green"),
("I", "blue"),
("M", "orange"),
("E", "violet")]),
}
# make sure things are in order
VOCABS["AA"] = OrderedDict((k, VOCABS["AA"][k]) for k in AMINO_ACIDS)
VOCABS["DNA"] = OrderedDict((k, VOCABS["DNA"][k]) for k in DNA)
VOCABS["RNA"] = OrderedDict((k, VOCABS["RNA"][k]) for k in RNA)
def add_letter_to_axis(ax, let, col, x, y, height):
"""Add 'let' with position x,y and height height to matplotlib axis 'ax'.
"""
if len(let) == 2:
colors = [col, "white"]
elif len(let) == 1:
colors = [col]
else:
raise ValueError("3 or more Polygons are not supported")
for polygon, color in zip(let, colors):
new_polygon = affinity.scale(
polygon, yfact=height, origin=(0, 0, 0))
new_polygon = affinity.translate(
new_polygon, xoff=x, yoff=y)
patch = PolygonPatch(
new_polygon, edgecolor=color, facecolor=color)
ax.add_patch(patch)
return
# TODO - add figsize???
def seqlogo(letter_heights, vocab="DNA", ax=None):
"""Make a logo plot
# Arguments
letter_heights: "motif length" x "vocabulary size" numpy array
Can also contain negative values.
vocab: str, Vocabulary name. Can be: DNA, RNA, AA, RNAStruct.
ax: matplotlib axis
"""
ax = ax or plt.gca()
assert letter_heights.shape[1] == len(VOCABS[vocab])
x_range = [1, letter_heights.shape[0]]
pos_heights = np.copy(letter_heights)
pos_heights[letter_heights < 0] = 0
neg_heights = np.copy(letter_heights)
neg_heights[letter_heights > 0] = 0
for x_pos, heights in enumerate(letter_heights):
letters_and_heights = sorted(zip(heights, list(VOCABS[vocab].keys())))
y_pos_pos = 0.0
y_neg_pos = 0.0
for height, letter in letters_and_heights:
color = VOCABS[vocab][letter]
polygons = letter_polygons[letter]
if height > 0:
add_letter_to_axis(ax, polygons, color, 0.5 + x_pos, y_pos_pos, height)
y_pos_pos += height
else:
add_letter_to_axis(ax, polygons, color, 0.5 + x_pos, y_neg_pos, height)
y_neg_pos += height
# if add_hline:
# ax.axhline(color="black", linewidth=1)
ax.set_xlim(x_range[0] - 1, x_range[1] + 1)
ax.grid(False)
ax.set_xticks(list(range(*x_range)) + [x_range[-1]])
ax.set_aspect(aspect='auto', adjustable='box')
ax.autoscale_view()
def seqlogo_fig(letter_heights, vocab="DNA", figsize=(10, 2), ncol=1, plot_name=None):
"""
# Arguments
plot_name: Title of the plot. Can be a list of names
"""
fig = plt.figure(figsize=figsize)
if len(letter_heights.shape) == 3:
#
n_plots = letter_heights.shape[2]
nrow = math.ceil(n_plots / ncol)
if isinstance(plot_name, list):
assert len(plot_name) == n_plots
else:
n_plots = 1
nrow = 1
ncol = 1
for i in range(n_plots):
if len(letter_heights.shape) == 3:
w_cur = letter_heights[:, :, i]
else:
w_cur = letter_heights
ax = plt.subplot(nrow, ncol, i + 1)
plt.tight_layout()
# plot the motif
seqlogo(w_cur, vocab, ax)
# add the title
if plot_name is not None:
if n_plots > 0:
if isinstance(plot_name, list):
pln = plot_name[i]
else:
pln = plot_name + " {0}".format(i)
else:
pln = plot_name
ax.set_title(pln)
return fig
| mit |
planetarymike/IDL-Colorbars | IDL_py_test/093_Multihue_Red1.py | 1 | 8802 | from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[0.0000208612, 0.0000200049, 0.0000198463],
[0.00038926, 0.000346551, 0.000249987],
[0.00114175, 0.000989297, 0.000648277],
[0.00225249, 0.00190778, 0.00116452],
[0.003715, 0.00307964, 0.001769],
[0.00552993, 0.00448905, 0.00244083],
[0.00770176, 0.00612344, 0.00316415],
[0.0102374, 0.00797218, 0.00392641],
[0.0131454, 0.0100259, 0.00471743],
[0.0164356, 0.0122762, 0.00552882],
[0.0201188, 0.014715, 0.00635364],
[0.0242069, 0.0173349, 0.00718612],
[0.0287119, 0.0201289, 0.00802144],
[0.0336469, 0.0230901, 0.00885566],
[0.0390251, 0.0262119, 0.00968554],
[0.0446557, 0.0294878, 0.0105085],
[0.0502698, 0.0329116, 0.0113226],
[0.0558811, 0.0364772, 0.0121263],
[0.0614933, 0.0401784, 0.0129186],
[0.0671096, 0.0438637, 0.013699],
[0.0727328, 0.0474728, 0.0144673],
[0.078365, 0.0510125, 0.0152236],
[0.0840083, 0.0544854, 0.0159685],
[0.0896641, 0.0578936, 0.0167028],
[0.0953339, 0.0612394, 0.0174278],
[0.101019, 0.0645243, 0.0181449],
[0.10672, 0.0677503, 0.0188557],
[0.112437, 0.0709189, 0.0195623],
[0.118172, 0.0740314, 0.0202669],
[0.123924, 0.0770894, 0.020972],
[0.129695, 0.0800939, 0.0216802],
[0.135483, 0.0830463, 0.0223945],
[0.14129, 0.0859477, 0.023118],
[0.147115, 0.0887992, 0.0238541],
[0.152959, 0.0916017, 0.0246065],
[0.15882, 0.0943563, 0.0253788],
[0.164699, 0.097064, 0.0261753],
[0.170595, 0.0997257, 0.027],
[0.176509, 0.102342, 0.0278576],
[0.18244, 0.104915, 0.0287527],
[0.188387, 0.107444, 0.0296904],
[0.194349, 0.10993, 0.0306757],
[0.200327, 0.112376, 0.0317143],
[0.20632, 0.11478, 0.0328117],
[0.212327, 0.117145, 0.033974],
[0.218347, 0.11947, 0.0352076],
[0.22438, 0.121758, 0.0365188],
[0.230425, 0.124008, 0.0379148],
[0.236482, 0.126222, 0.0394026],
[0.24255, 0.128401, 0.0409786],
[0.248627, 0.130545, 0.0426123],
[0.254714, 0.132655, 0.0443161],
[0.260808, 0.134733, 0.0460927],
[0.266911, 0.136779, 0.0479446],
[0.27302, 0.138795, 0.0498741],
[0.279135, 0.14078, 0.0518833],
[0.285255, 0.142737, 0.0539742],
[0.291379, 0.144666, 0.0561487],
[0.297506, 0.146568, 0.0584083],
[0.303635, 0.148445, 0.0607546],
[0.309766, 0.150297, 0.0631889],
[0.315897, 0.152125, 0.0657126],
[0.322027, 0.153932, 0.0683267],
[0.328156, 0.155717, 0.0710322],
[0.334282, 0.157482, 0.0738301],
[0.340405, 0.159229, 0.0767211],
[0.346523, 0.160958, 0.079706],
[0.352636, 0.162671, 0.0827854],
[0.358742, 0.164369, 0.0859598],
[0.36484, 0.166053, 0.0892299],
[0.37093, 0.167726, 0.0925959],
[0.37701, 0.169387, 0.0960584],
[0.383079, 0.171039, 0.0996175],
[0.389137, 0.172684, 0.103274],
[0.395182, 0.174321, 0.107027],
[0.401212, 0.175954, 0.110878],
[0.407228, 0.177584, 0.114826],
[0.413227, 0.179211, 0.118872],
[0.419209, 0.180838, 0.123015],
[0.425173, 0.182467, 0.127257],
[0.431117, 0.184098, 0.131596],
[0.43704, 0.185734, 0.136032],
[0.442942, 0.187376, 0.140567],
[0.448821, 0.189027, 0.145198],
[0.454676, 0.190686, 0.149927],
[0.460505, 0.192358, 0.154752],
[0.466309, 0.194042, 0.159674],
[0.472084, 0.195741, 0.164693],
[0.477832, 0.197457, 0.169807],
[0.483549, 0.199192, 0.175017],
[0.489236, 0.200946, 0.180322],
[0.49489, 0.202723, 0.185721],
[0.500511, 0.204523, 0.191213],
[0.506098, 0.206349, 0.196799],
[0.511649, 0.208203, 0.202476],
[0.517163, 0.210085, 0.208245],
[0.52264, 0.211999, 0.214104],
[0.528077, 0.213945, 0.220053],
[0.533475, 0.215926, 0.22609],
[0.538831, 0.217943, 0.232214],
[0.544144, 0.219998, 0.238423],
[0.549414, 0.222093, 0.244718],
[0.554639, 0.224229, 0.251095],
[0.559819, 0.226408, 0.257554],
[0.564951, 0.228631, 0.264092],
[0.570036, 0.230901, 0.270709],
[0.575071, 0.233219, 0.277403],
[0.580056, 0.235586, 0.28417],
[0.58499, 0.238004, 0.291011],
[0.589871, 0.240473, 0.297922],
[0.594699, 0.242997, 0.304901],
[0.599473, 0.245575, 0.311947],
[0.604191, 0.24821, 0.319056],
[0.608853, 0.250901, 0.326227],
[0.613457, 0.253652, 0.333456],
[0.618003, 0.256462, 0.340742],
[0.62249, 0.259333, 0.348082],
[0.626917, 0.262265, 0.355472],
[0.631283, 0.26526, 0.36291],
[0.635587, 0.268318, 0.370393],
[0.639829, 0.271441, 0.377918],
[0.644007, 0.274629, 0.385481],
[0.648121, 0.277882, 0.393081],
[0.65217, 0.281201, 0.400713],
[0.656154, 0.284588, 0.408375],
[0.660071, 0.288041, 0.416062],
[0.663922, 0.291561, 0.423772],
[0.667705, 0.29515, 0.431501],
[0.671421, 0.298806, 0.439246],
[0.675068, 0.302531, 0.447002],
[0.678647, 0.306324, 0.454768],
[0.682156, 0.310184, 0.462538],
[0.685596, 0.314113, 0.47031],
[0.688967, 0.31811, 0.478079],
[0.692267, 0.322175, 0.485842],
[0.695498, 0.326307, 0.493596],
[0.698658, 0.330506, 0.501337],
[0.701748, 0.334771, 0.509061],
[0.704768, 0.339103, 0.516765],
[0.707718, 0.3435, 0.524444],
[0.710597, 0.347962, 0.532096],
[0.713407, 0.352489, 0.539717],
[0.716148, 0.357078, 0.547303],
[0.718819, 0.36173, 0.554851],
[0.721422, 0.366444, 0.562358],
[0.723956, 0.371219, 0.569819],
[0.726423, 0.376053, 0.577232],
[0.728823, 0.380946, 0.584594],
[0.731156, 0.385896, 0.591902],
[0.733424, 0.390903, 0.599152],
[0.735627, 0.395965, 0.606341],
[0.737767, 0.401081, 0.613466],
[0.739844, 0.40625, 0.620525],
[0.741859, 0.41147, 0.627515],
[0.743814, 0.41674, 0.634434],
[0.74571, 0.422058, 0.641278],
[0.747548, 0.427423, 0.648045],
[0.749329, 0.432834, 0.654734],
[0.751056, 0.43829, 0.661341],
[0.75273, 0.443787, 0.667865],
[0.754352, 0.449326, 0.674305],
[0.755924, 0.454904, 0.680657],
[0.757448, 0.46052, 0.686921],
[0.758926, 0.466173, 0.693095],
[0.76036, 0.47186, 0.699178],
[0.761752, 0.47758, 0.705167],
[0.763103, 0.483332, 0.711063],
[0.764418, 0.489114, 0.716864],
[0.765696, 0.494924, 0.722569],
[0.766942, 0.500761, 0.728178],
[0.768157, 0.506622, 0.733689],
[0.769343, 0.512507, 0.739103],
[0.770504, 0.518414, 0.744419],
[0.771642, 0.524342, 0.749637],
[0.772759, 0.530288, 0.754756],
[0.773859, 0.536251, 0.759778],
[0.774943, 0.54223, 0.764701],
[0.776016, 0.548223, 0.769527],
[0.777079, 0.554228, 0.774255],
[0.778135, 0.560245, 0.778886],
[0.779187, 0.566272, 0.783422],
[0.780239, 0.572307, 0.787862],
[0.781293, 0.578349, 0.792207],
[0.782353, 0.584397, 0.796459],
[0.78342, 0.59045, 0.800619],
[0.784499, 0.596505, 0.804687],
[0.785592, 0.602562, 0.808666],
[0.786702, 0.60862, 0.812556],
[0.787832, 0.614678, 0.816359],
[0.788985, 0.620734, 0.820076],
[0.790165, 0.626787, 0.82371],
[0.791373, 0.632837, 0.827261],
[0.792614, 0.638882, 0.830732],
[0.79389, 0.644921, 0.834124],
[0.795204, 0.650954, 0.83744],
[0.796558, 0.656979, 0.840682],
[0.797956, 0.662996, 0.843851],
[0.7994, 0.669004, 0.846949],
[0.800893, 0.675002, 0.849979],
[0.802438, 0.680989, 0.852943],
[0.804037, 0.686966, 0.855844],
[0.805692, 0.69293, 0.858683],
[0.807407, 0.698882, 0.861463],
[0.809183, 0.704821, 0.864186],
[0.811023, 0.710746, 0.866854],
[0.812929, 0.716658, 0.869471],
[0.814904, 0.722555, 0.872038],
[0.816948, 0.728437, 0.874559],
[0.819064, 0.734305, 0.877034],
[0.821255, 0.740157, 0.879468],
[0.823521, 0.745993, 0.881862],
[0.825864, 0.751813, 0.88422],
[0.828286, 0.757617, 0.886542],
[0.830789, 0.763405, 0.888833],
[0.833373, 0.769177, 0.891095],
[0.836041, 0.774932, 0.89333],
[0.838792, 0.78067, 0.895541],
[0.841629, 0.786392, 0.89773],
[0.844551, 0.792097, 0.899899],
[0.847561, 0.797786, 0.902053],
[0.850658, 0.803458, 0.904192],
[0.853844, 0.809114, 0.906319],
[0.857118, 0.814754, 0.908438],
[0.860481, 0.820377, 0.91055],
[0.863934, 0.825985, 0.912658],
[0.867476, 0.831577, 0.914765],
[0.871108, 0.837154, 0.916872],
[0.87483, 0.842715, 0.918983],
[0.878641, 0.848261, 0.9211],
[0.882542, 0.853793, 0.923225],
[0.886532, 0.859311, 0.925361],
[0.890611, 0.864814, 0.92751],
[0.894779, 0.870304, 0.929675],
[0.899034, 0.875781, 0.931858],
[0.903376, 0.881245, 0.934061],
[0.907805, 0.886697, 0.936287],
[0.91232, 0.892137, 0.938539],
[0.916919, 0.897565, 0.940818],
[0.921603, 0.902983, 0.943128],
[0.926369, 0.90839, 0.945471],
[0.931216, 0.913787, 0.94785],
[0.936144, 0.919176, 0.950266],
[0.941152, 0.924555, 0.952724],
[0.946237, 0.929926, 0.955225],
[0.951398, 0.93529, 0.957773],
[0.956633, 0.940647, 0.960372],
[0.961942, 0.945998, 0.963024],
[0.967321, 0.951343, 0.965733],
[0.972768, 0.956684, 0.968504],
[0.978282, 0.96202, 0.971342],
[0.983859, 0.967353, 0.974254],
[0.989496, 0.972684, 0.977248],
[0.995188, 0.978013, 0.980335],
[1., 0.983342, 0.983535],
[1., 0.988671, 0.986883],
[1., 0.994001, 0.990529]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| gpl-2.0 |
pprett/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>,
# Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
ndingwall/scikit-learn | examples/model_selection/plot_nested_cross_validation_iris.py | 23 | 4413 | """
=========================================
Nested versus non-nested cross-validation
=========================================
This example compares non-nested and nested cross-validation strategies on a
classifier of the iris data set. Nested cross-validation (CV) is often used to
train a model in which hyperparameters also need to be optimized. Nested CV
estimates the generalization error of the underlying model and its
(hyper)parameter search. Choosing the parameters that maximize non-nested CV
biases the model to the dataset, yielding an overly-optimistic score.
Model selection without nested CV uses the same data to tune model parameters
and evaluate model performance. Information may thus "leak" into the model
and overfit the data. The magnitude of this effect is primarily dependent on
the size of the dataset and the stability of the model. See Cawley and Talbot
[1]_ for an analysis of these issues.
To avoid this problem, nested CV effectively uses a series of
train/validation/test set splits. In the inner loop (here executed by
:class:`GridSearchCV <sklearn.model_selection.GridSearchCV>`), the score is
approximately maximized by fitting a model to each training set, and then
directly maximized in selecting (hyper)parameters over the validation set. In
the outer loop (here in :func:`cross_val_score
<sklearn.model_selection.cross_val_score>`), generalization error is estimated
by averaging test set scores over several dataset splits.
The example below uses a support vector classifier with a non-linear kernel to
build a model with optimized hyperparameters by grid search. We compare the
performance of non-nested and nested CV strategies by taking the difference
between their scores.
.. topic:: See Also:
- :ref:`cross_validation`
- :ref:`grid_search`
.. topic:: References:
.. [1] `Cawley, G.C.; Talbot, N.L.C. On over-fitting in model selection and
subsequent selection bias in performance evaluation.
J. Mach. Learn. Res 2010,11, 2079-2107.
<http://jmlr.csail.mit.edu/papers/volume11/cawley10a/cawley10a.pdf>`_
"""
from sklearn.datasets import load_iris
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold
import numpy as np
print(__doc__)
# Number of random trials
NUM_TRIALS = 30
# Load the dataset
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
# Set up possible values of parameters to optimize over
p_grid = {"C": [1, 10, 100],
"gamma": [.01, .1]}
# We will use a Support Vector Classifier with "rbf" kernel
svm = SVC(kernel="rbf")
# Arrays to store scores
non_nested_scores = np.zeros(NUM_TRIALS)
nested_scores = np.zeros(NUM_TRIALS)
# Loop for each trial
for i in range(NUM_TRIALS):
# Choose cross-validation techniques for the inner and outer loops,
# independently of the dataset.
# E.g "GroupKFold", "LeaveOneOut", "LeaveOneGroupOut", etc.
inner_cv = KFold(n_splits=4, shuffle=True, random_state=i)
outer_cv = KFold(n_splits=4, shuffle=True, random_state=i)
# Non_nested parameter search and scoring
clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=inner_cv)
clf.fit(X_iris, y_iris)
non_nested_scores[i] = clf.best_score_
# Nested CV with parameter optimization
nested_score = cross_val_score(clf, X=X_iris, y=y_iris, cv=outer_cv)
nested_scores[i] = nested_score.mean()
score_difference = non_nested_scores - nested_scores
print("Average difference of {:6f} with std. dev. of {:6f}."
.format(score_difference.mean(), score_difference.std()))
# Plot scores on each trial for nested and non-nested CV
plt.figure()
plt.subplot(211)
non_nested_scores_line, = plt.plot(non_nested_scores, color='r')
nested_line, = plt.plot(nested_scores, color='b')
plt.ylabel("score", fontsize="14")
plt.legend([non_nested_scores_line, nested_line],
["Non-Nested CV", "Nested CV"],
bbox_to_anchor=(0, .4, .5, 0))
plt.title("Non-Nested and Nested Cross Validation on Iris Dataset",
x=.5, y=1.1, fontsize="15")
# Plot bar chart of the difference.
plt.subplot(212)
difference_plot = plt.bar(range(NUM_TRIALS), score_difference)
plt.xlabel("Individual Trial #")
plt.legend([difference_plot],
["Non-Nested CV - Nested CV Score"],
bbox_to_anchor=(0, 1, .8, 0))
plt.ylabel("score difference", fontsize="14")
plt.show()
| bsd-3-clause |
Windy-Ground/scikit-learn | sklearn/linear_model/sag.py | 64 | 9815 | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# Licence: BSD 3 clause
import numpy as np
import warnings
from ..utils import ConvergenceWarning
from ..utils import check_array
from .base import make_dataset
from .sgd_fast import Log, SquaredLoss
from .sag_fast import sag, get_max_squared_sum
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
Returns
-------
step_size : float
Step size used in SAG solver.
"""
if loss == 'log':
# inverse Lipschitz constant for log loss
return 4.0 / (max_squared_sum + int(fit_intercept)
+ 4.0 * alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
return 1.0 / (max_squared_sum + int(fit_intercept) + alpha_scaled)
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=dict()):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared'
Loss function that will be optimized.
'log' is used for classification, like in LogisticRegression.
'squared' is used for regression, like in Ridge.
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter: int, optional
The max number of passes over the training data if the stopping
criterea is not reached. Defaults to 1000.
tol: double, optional
The stopping criterea for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose: integer, optional
The verbosity level.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem: dict, optional
The initialization parameters used for warm starting. It is currently
not used in Ridge.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and eventually the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
coef_init = np.zeros(n_features, dtype=np.float64, order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.size == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1]
coef_init = coef_init[:-1]
else:
intercept_init = 0.0
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient_init = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient_init = 0.0
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros(n_samples, dtype=np.float64,
order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros(n_features, dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
if loss == 'log':
class_loss = Log()
elif loss == 'squared':
class_loss = SquaredLoss()
else:
raise ValueError("Invalid loss parameter: got %r instead of "
"one of ('log', 'squared')" % loss)
intercept_, num_seen, n_iter_, intercept_sum_gradient = \
sag(dataset, coef_init.ravel(),
intercept_init, n_samples,
n_features, tol,
max_iter,
class_loss,
step_size, alpha_scaled,
sum_gradient_init.ravel(),
gradient_memory_init.ravel(),
seen_init.ravel(),
num_seen_init,
fit_intercept,
intercept_sum_gradient_init,
intercept_decay,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
coef_ = coef_init
if fit_intercept:
coef_ = np.append(coef_, intercept_)
warm_start_mem = {'coef': coef_, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
return coef_, n_iter_, warm_start_mem
| bsd-3-clause |
shusenl/scikit-learn | sklearn/linear_model/coordinate_descent.py | 59 | 76336 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already float64 Fortran ordered when bypassing
# checks
check_input = 'check_input' not in params or params['check_input']
pre_fit = 'check_input' not in params or params['pre_fit']
if check_input:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F',
copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if pre_fit:
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False,
copy=False, Xy_precompute_order='F')
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, 'csc', dtype=np.float64,
order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
# We expect X and y to be already float64 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F',
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False, Xy_precompute_order='F')
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False,
pre_fit=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
nan86150/ImageFusion | lib/python2.7/site-packages/matplotlib/patches.py | 10 | 142681 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import map, zip
import math
import matplotlib as mpl
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.artist as artist
from matplotlib.artist import allow_rasterization
import matplotlib.colors as colors
from matplotlib import docstring
import matplotlib.transforms as transforms
from matplotlib.path import Path
from matplotlib.cbook import mplDeprecation
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object definition
docstring.interpd.update(Patch="""
================= ==============================================
Property Description
================= ==============================================
alpha float
animated [True | False]
antialiased or aa [True | False]
capstyle ['butt' | 'round' | 'projecting']
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
edgecolor or ec any matplotlib color
facecolor or fc any matplotlib color
figure a matplotlib.figure.Figure instance
fill [True | False]
hatch unknown
joinstyle ['miter' | 'round' | 'bevel']
label any string
linewidth or lw float
lod [True | False]
transform a matplotlib.transform transformation instance
visible [True | False]
zorder any number
================= ==============================================
""")
class Patch(artist.Artist):
"""
A patch is a 2D artist with a face color and an edge color.
If any of *edgecolor*, *facecolor*, *linewidth*, or *antialiased*
are *None*, they default to their rc params setting.
"""
zorder = 1
validCap = ('butt', 'round', 'projecting')
validJoin = ('miter', 'round', 'bevel')
def __str__(self):
return str(self.__class__).split('.')[-1]
def __init__(self,
edgecolor=None,
facecolor=None,
color=None,
linewidth=None,
linestyle=None,
antialiased=None,
hatch=None,
fill=True,
capstyle=None,
joinstyle=None,
**kwargs):
"""
The following kwarg properties are supported
%(Patch)s
"""
artist.Artist.__init__(self)
if linewidth is None:
linewidth = mpl.rcParams['patch.linewidth']
if linestyle is None:
linestyle = "solid"
if capstyle is None:
capstyle = 'butt'
if joinstyle is None:
joinstyle = 'miter'
if antialiased is None:
antialiased = mpl.rcParams['patch.antialiased']
self._fill = True # needed for set_facecolor call
if color is not None:
if (edgecolor is not None or
facecolor is not None):
import warnings
warnings.warn("Setting the 'color' property will override"
"the edgecolor or facecolor properties. ")
self.set_color(color)
else:
self.set_edgecolor(edgecolor)
self.set_facecolor(facecolor)
self.set_linewidth(linewidth)
self.set_linestyle(linestyle)
self.set_antialiased(antialiased)
self.set_hatch(hatch)
self.set_fill(fill)
self.set_capstyle(capstyle)
self.set_joinstyle(joinstyle)
self._combined_transform = transforms.IdentityTransform()
if len(kwargs):
self.update(kwargs)
def get_verts(self):
"""
Return a copy of the vertices used in this patch
If the patch contains Bezier curves, the curves will be
interpolated by line segments. To access the curves as
curves, use :meth:`get_path`.
"""
trans = self.get_transform()
path = self.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
return []
def contains(self, mouseevent, radius=None):
"""Test whether the mouse event occurred in the patch.
Returns T/F, {}
"""
# This is a general version of contains that should work on any
# patch with a path. However, patches that have a faster
# algebraic solution to hit-testing should override this
# method.
if six.callable(self._contains):
return self._contains(self, mouseevent)
if radius is None:
radius = self.get_linewidth()
inside = self.get_path().contains_point(
(mouseevent.x, mouseevent.y), self.get_transform(), radius)
return inside, {}
def contains_point(self, point, radius=None):
"""
Returns *True* if the given point is inside the path
(transformed with its transform attribute).
"""
if radius is None:
radius = self.get_linewidth()
return self.get_path().contains_point(point,
self.get_transform(),
radius)
def update_from(self, other):
"""
Updates this :class:`Patch` from the properties of *other*.
"""
artist.Artist.update_from(self, other)
self.set_edgecolor(other.get_edgecolor())
self.set_facecolor(other.get_facecolor())
self.set_fill(other.get_fill())
self.set_hatch(other.get_hatch())
self.set_linewidth(other.get_linewidth())
self.set_linestyle(other.get_linestyle())
self.set_transform(other.get_data_transform())
self.set_figure(other.get_figure())
self.set_alpha(other.get_alpha())
def get_extents(self):
"""
Return a :class:`~matplotlib.transforms.Bbox` object defining
the axis-aligned extents of the :class:`Patch`.
"""
return self.get_path().get_extents(self.get_transform())
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the :class:`Patch`.
"""
return self.get_patch_transform() + artist.Artist.get_transform(self)
def get_data_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` instance which
maps data coordinates to physical coordinates.
"""
return artist.Artist.get_transform(self)
def get_patch_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` instance which
takes patch coordinates to data coordinates.
For example, one may define a patch of a circle which represents a
radius of 5 by providing coordinates for a unit circle, and a
transform which scales the coordinates (the patch coordinate) by 5.
"""
return transforms.IdentityTransform()
def get_antialiased(self):
"""
Returns True if the :class:`Patch` is to be drawn with antialiasing.
"""
return self._antialiased
get_aa = get_antialiased
def get_edgecolor(self):
"""
Return the edge color of the :class:`Patch`.
"""
return self._edgecolor
get_ec = get_edgecolor
def get_facecolor(self):
"""
Return the face color of the :class:`Patch`.
"""
return self._facecolor
get_fc = get_facecolor
def get_linewidth(self):
"""
Return the line width in points.
"""
return self._linewidth
get_lw = get_linewidth
def get_linestyle(self):
"""
Return the linestyle. Will be one of ['solid' | 'dashed' |
'dashdot' | 'dotted']
"""
return self._linestyle
get_ls = get_linestyle
def set_antialiased(self, aa):
"""
Set whether to use antialiased rendering
ACCEPTS: [True | False] or None for default
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiased = aa
def set_aa(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_edgecolor(self, color):
"""
Set the patch edge color
ACCEPTS: mpl color spec, or None for default, or 'none' for no color
"""
if color is None:
color = mpl.rcParams['patch.edgecolor']
self._original_edgecolor = color
self._edgecolor = colors.colorConverter.to_rgba(color, self._alpha)
def set_ec(self, color):
"""alias for set_edgecolor"""
return self.set_edgecolor(color)
def set_facecolor(self, color):
"""
Set the patch face color
ACCEPTS: mpl color spec, or None for default, or 'none' for no color
"""
if color is None:
color = mpl.rcParams['patch.facecolor']
self._original_facecolor = color # save: otherwise changing _fill
# may lose alpha information
self._facecolor = colors.colorConverter.to_rgba(color, self._alpha)
if not self._fill:
self._facecolor = list(self._facecolor)
self._facecolor[3] = 0
def set_fc(self, color):
"""alias for set_facecolor"""
return self.set_facecolor(color)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color spec
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
For setting the edge or face color individually.
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparency of the patch.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
self.set_facecolor(self._original_facecolor) # using self._fill and
# self._alpha
self.set_edgecolor(self._original_edgecolor)
def set_linewidth(self, w):
"""
Set the patch linewidth in points
ACCEPTS: float or None for default
"""
if w is None:
w = mpl.rcParams['patch.linewidth']
self._linewidth = w
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the patch linestyle
ACCEPTS: ['solid' | 'dashed' | 'dashdot' | 'dotted']
"""
if ls is None:
ls = "solid"
self._linestyle = ls
def set_ls(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_fill(self, b):
"""
Set whether to fill the patch
ACCEPTS: [True | False]
"""
self._fill = bool(b)
self.set_facecolor(self._original_facecolor)
def get_fill(self):
'return whether fill is set'
return self._fill
# Make fill a property so as to preserve the long-standing
# but somewhat inconsistent behavior in which fill was an
# attribute.
fill = property(get_fill, set_fill)
def set_capstyle(self, s):
"""
Set the patch capstyle
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._capstyle = s
def get_capstyle(self):
"Return the current capstyle"
return self._capstyle
def set_joinstyle(self, s):
"""
Set the patch joinstyle
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._joinstyle = s
def get_joinstyle(self):
"Return the current joinstyle"
return self._joinstyle
def set_hatch(self, hatch):
"""
Set the hatching pattern
*hatch* can be one of::
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
x - crossed diagonal
o - small circle
O - large circle
. - dots
* - stars
Letters can be combined, in which case all the specified
hatchings are done. If same letter repeats, it increases the
density of hatching of that pattern.
Hatching is supported in the PostScript, PDF, SVG and Agg
backends only.
ACCEPTS: ['/' | '\\\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*']
"""
self._hatch = hatch
def get_hatch(self):
'Return the current hatching pattern'
return self._hatch
@allow_rasterization
def draw(self, renderer):
'Draw the :class:`Patch` to the given *renderer*.'
if not self.get_visible():
return
renderer.open_group('patch', self.get_gid())
gc = renderer.new_gc()
gc.set_foreground(self._edgecolor, isRGBA=True)
lw = self._linewidth
if self._edgecolor[3] == 0:
lw = 0
gc.set_linewidth(lw)
gc.set_linestyle(self._linestyle)
gc.set_capstyle(self._capstyle)
gc.set_joinstyle(self._joinstyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_url(self._url)
gc.set_snap(self.get_snap())
rgbFace = self._facecolor
if rgbFace[3] == 0:
rgbFace = None # (some?) renderers expect this as no-fill signal
gc.set_alpha(self._alpha)
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
path = self.get_path()
transform = self.get_transform()
tpath = transform.transform_path_non_affine(path)
affine = transform.get_affine()
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
renderer.draw_path(gc, tpath, affine, rgbFace)
gc.restore()
renderer.close_group('patch')
def get_path(self):
"""
Return the path of this patch
"""
raise NotImplementedError('Derived must override')
def get_window_extent(self, renderer=None):
return self.get_path().get_extents(self.get_transform())
patchdoc = artist.kwdoc(Patch)
for k in ('Rectangle', 'Circle', 'RegularPolygon', 'Polygon', 'Wedge', 'Arrow',
'FancyArrow', 'YAArrow', 'CirclePolygon', 'Ellipse', 'Arc',
'FancyBboxPatch', 'Patch'):
docstring.interpd.update({k: patchdoc})
# define Patch.__init__ docstring after the class has been added to interpd
docstring.dedent_interpd(Patch.__init__)
class Shadow(Patch):
def __str__(self):
return "Shadow(%s)" % (str(self.patch))
@docstring.dedent_interpd
def __init__(self, patch, ox, oy, props=None, **kwargs):
"""
Create a shadow of the given *patch* offset by *ox*, *oy*.
*props*, if not *None*, is a patch property update dictionary.
If *None*, the shadow will have have the same color as the face,
but darkened.
kwargs are
%(Patch)s
"""
Patch.__init__(self)
self.patch = patch
self.props = props
self._ox, self._oy = ox, oy
self._shadow_transform = transforms.Affine2D()
self._update()
def _update(self):
self.update_from(self.patch)
if self.props is not None:
self.update(self.props)
else:
r, g, b, a = colors.colorConverter.to_rgba(
self.patch.get_facecolor())
rho = 0.3
r = rho * r
g = rho * g
b = rho * b
self.set_facecolor((r, g, b, 0.5))
self.set_edgecolor((r, g, b, 0.5))
self.set_alpha(0.5)
def _update_transform(self, renderer):
ox = renderer.points_to_pixels(self._ox)
oy = renderer.points_to_pixels(self._oy)
self._shadow_transform.clear().translate(ox, oy)
def _get_ox(self):
return self._ox
def _set_ox(self, ox):
self._ox = ox
def _get_oy(self):
return self._oy
def _set_oy(self, oy):
self._oy = oy
def get_path(self):
return self.patch.get_path()
def get_patch_transform(self):
return self.patch.get_patch_transform() + self._shadow_transform
def draw(self, renderer):
self._update_transform(renderer)
Patch.draw(self, renderer)
class Rectangle(Patch):
"""
Draw a rectangle with lower left at *xy* = (*x*, *y*) with
specified *width* and *height*.
"""
def __str__(self):
return self.__class__.__name__ \
+ "(%g,%g;%gx%g)" % (self._x, self._y, self._width, self._height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
*angle*
rotation in degrees (anti-clockwise)
*fill* is a boolean indicating whether to fill the rectangle
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = xy[0]
self._y = xy[1]
self._width = width
self._height = height
self._angle = angle
# Note: This cannot be calculated until this is added to an Axes
self._rect_transform = transforms.IdentityTransform()
def get_path(self):
"""
Return the vertices of the rectangle
"""
return Path.unit_rectangle()
def _update_patch_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
x = self.convert_xunits(self._x)
y = self.convert_yunits(self._y)
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
bbox = transforms.Bbox.from_bounds(x, y, width, height)
rot_trans = transforms.Affine2D()
rot_trans.rotate_deg_around(x, y, self._angle)
self._rect_transform = transforms.BboxTransformTo(bbox)
self._rect_transform += rot_trans
def get_patch_transform(self):
self._update_patch_transform()
return self._rect_transform
def contains(self, mouseevent):
# special case the degenerate rectangle
if self._width == 0 or self._height == 0:
return False, {}
x, y = self.get_transform().inverted().transform_point(
(mouseevent.x, mouseevent.y))
return (x >= 0.0 and x <= 1.0 and y >= 0.0 and y <= 1.0), {}
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_xy(self):
"Return the left and bottom coords of the rectangle"
return self._x, self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
def set_xy(self, xy):
"""
Set the left and bottom coords of the rectangle
ACCEPTS: 2-item sequence
"""
self._x, self._y = xy
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args) == 0:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x = l
self._y = b
self._width = w
self._height = h
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y,
self._width, self._height)
xy = property(get_xy, set_xy)
class RegularPolygon(Patch):
"""
A regular polygon patch.
"""
def __str__(self):
return "Poly%d(%g,%g)" % (self._numVertices, self._xy[0], self._xy[1])
@docstring.dedent_interpd
def __init__(self, xy, numVertices, radius=5, orientation=0,
**kwargs):
"""
Constructor arguments:
*xy*
A length 2 tuple (*x*, *y*) of the center.
*numVertices*
the number of vertices.
*radius*
The distance from the center to each of the vertices.
*orientation*
rotates the polygon (in radians).
Valid kwargs are:
%(Patch)s
"""
self._xy = xy
self._numVertices = numVertices
self._orientation = orientation
self._radius = radius
self._path = Path.unit_regular_polygon(numVertices)
self._poly_transform = transforms.Affine2D()
self._update_transform()
Patch.__init__(self, **kwargs)
def _update_transform(self):
self._poly_transform.clear() \
.scale(self.radius) \
.rotate(self.orientation) \
.translate(*self.xy)
def _get_xy(self):
return self._xy
def _set_xy(self, xy):
self._xy = xy
self._update_transform()
xy = property(_get_xy, _set_xy)
def _get_orientation(self):
return self._orientation
def _set_orientation(self, orientation):
self._orientation = orientation
self._update_transform()
orientation = property(_get_orientation, _set_orientation)
def _get_radius(self):
return self._radius
def _set_radius(self, radius):
self._radius = radius
self._update_transform()
radius = property(_get_radius, _set_radius)
def _get_numvertices(self):
return self._numVertices
def _set_numvertices(self, numVertices):
self._numVertices = numVertices
numvertices = property(_get_numvertices, _set_numvertices)
def get_path(self):
return self._path
def get_patch_transform(self):
self._update_transform()
return self._poly_transform
class PathPatch(Patch):
"""
A general polycurve path patch.
"""
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
@docstring.dedent_interpd
def __init__(self, path, **kwargs):
"""
*path* is a :class:`matplotlib.path.Path` object.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
Patch.__init__(self, **kwargs)
self._path = path
def get_path(self):
return self._path
class Polygon(Patch):
"""
A general polygon patch.
"""
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
@docstring.dedent_interpd
def __init__(self, xy, closed=True, **kwargs):
"""
*xy* is a numpy array with shape Nx2.
If *closed* is *True*, the polygon will be closed so the
starting and ending points are the same.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
Patch.__init__(self, **kwargs)
self._closed = closed
self.set_xy(xy)
def get_path(self):
"""
Get the path of the polygon
Returns
-------
path : Path
The :class:`~matplotlib.path.Path` object for
the polygon
"""
return self._path
def get_closed(self):
"""
Returns if the polygon is closed
Returns
-------
closed : bool
If the path is closed
"""
return self._closed
def set_closed(self, closed):
"""
Set if the polygon is closed
Parameters
----------
closed : bool
True if the polygon is closed
"""
if self._closed == bool(closed):
return
self._closed = bool(closed)
self.set_xy(self.get_xy())
def get_xy(self):
"""
Get the vertices of the path
Returns
-------
vertices : numpy array
The coordinates of the vertices as a Nx2
ndarray.
"""
return self._path.vertices
def set_xy(self, xy):
"""
Set the vertices of the polygon
Parameters
----------
xy : numpy array or iterable of pairs
The coordinates of the vertices as a Nx2
ndarray or iterable of pairs.
"""
xy = np.asarray(xy)
if self._closed:
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
else:
if len(xy) > 2 and (xy[0] == xy[-1]).all():
xy = xy[:-1]
self._path = Path(xy, closed=self._closed)
_get_xy = get_xy
_set_xy = set_xy
xy = property(
get_xy, set_xy, None,
"""Set/get the vertices of the polygon. This property is
provided for backward compatibility with matplotlib 0.91.x
only. New code should use
:meth:`~matplotlib.patches.Polygon.get_xy` and
:meth:`~matplotlib.patches.Polygon.set_xy` instead.""")
class Wedge(Patch):
"""
Wedge shaped patch.
"""
def __str__(self):
return "Wedge(%g,%g)" % (self.theta1, self.theta2)
@docstring.dedent_interpd
def __init__(self, center, r, theta1, theta2, width=None, **kwargs):
"""
Draw a wedge centered at *x*, *y* center with radius *r* that
sweeps *theta1* to *theta2* (in degrees). If *width* is given,
then a partial wedge is drawn from inner radius *r* - *width*
to outer radius *r*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = center
self.r, self.width = r, width
self.theta1, self.theta2 = theta1, theta2
self._patch_transform = transforms.IdentityTransform()
self._recompute_path()
def _recompute_path(self):
# Inner and outer rings are connected unless the annulus is complete
if abs((self.theta2 - self.theta1) - 360) <= 1e-12:
theta1, theta2 = 0, 360
connector = Path.MOVETO
else:
theta1, theta2 = self.theta1, self.theta2
connector = Path.LINETO
# Form the outer ring
arc = Path.arc(theta1, theta2)
if self.width is not None:
# Partial annulus needs to draw the outer ring
# followed by a reversed and scaled inner ring
v1 = arc.vertices
v2 = arc.vertices[::-1] * float(self.r - self.width) / self.r
v = np.vstack([v1, v2, v1[0, :], (0, 0)])
c = np.hstack([arc.codes, arc.codes, connector, Path.CLOSEPOLY])
c[len(arc.codes)] = connector
else:
# Wedge doesn't need an inner ring
v = np.vstack([arc.vertices, [(0, 0), arc.vertices[0, :], (0, 0)]])
c = np.hstack([arc.codes, [connector, connector, Path.CLOSEPOLY]])
# Shift and scale the wedge to the final location.
v *= self.r
v += np.asarray(self.center)
self._path = Path(v, c)
def set_center(self, center):
self._path = None
self.center = center
def set_radius(self, radius):
self._path = None
self.r = radius
def set_theta1(self, theta1):
self._path = None
self.theta1 = theta1
def set_theta2(self, theta2):
self._path = None
self.theta2 = theta2
def set_width(self, width):
self._path = None
self.width = width
def get_path(self):
if self._path is None:
self._recompute_path()
return self._path
# COVERAGE NOTE: Not used internally or from examples
class Arrow(Patch):
"""
An arrow patch.
"""
def __str__(self):
return "Arrow()"
_path = Path([
[0.0, 0.1], [0.0, -0.1],
[0.8, -0.1], [0.8, -0.3],
[1.0, 0.0], [0.8, 0.3],
[0.8, 0.1], [0.0, 0.1]],
closed=True)
@docstring.dedent_interpd
def __init__(self, x, y, dx, dy, width=1.0, **kwargs):
"""
Draws an arrow, starting at (*x*, *y*), direction and length
given by (*dx*, *dy*) the width of the arrow is scaled by *width*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
L = np.sqrt(dx ** 2 + dy ** 2) or 1 # account for div by zero
cx = float(dx) / L
sx = float(dy) / L
trans1 = transforms.Affine2D().scale(L, width)
trans2 = transforms.Affine2D.from_values(cx, sx, -sx, cx, 0.0, 0.0)
trans3 = transforms.Affine2D().translate(x, y)
trans = trans1 + trans2 + trans3
self._patch_transform = trans.frozen()
def get_path(self):
return self._path
def get_patch_transform(self):
return self._patch_transform
class FancyArrow(Polygon):
"""
Like Arrow, but lets you set head width and head height independently.
"""
def __str__(self):
return "FancyArrow()"
@docstring.dedent_interpd
def __init__(self, x, y, dx, dy, width=0.001, length_includes_head=False,
head_width=None, head_length=None, shape='full', overhang=0,
head_starts_at_zero=False, **kwargs):
"""
Constructor arguments
*width*: float (default: 0.001)
width of full arrow tail
*length_includes_head*: [True | False] (default: False)
True if head is to be counted in calculating the length.
*head_width*: float or None (default: 3*width)
total width of the full arrow head
*head_length*: float or None (default: 1.5 * head_width)
length of arrow head
*shape*: ['full', 'left', 'right'] (default: 'full')
draw the left-half, right-half, or full arrow
*overhang*: float (default: 0)
fraction that the arrow is swept back (0 overhang means
triangular shape). Can be negative or greater than one.
*head_starts_at_zero*: [True | False] (default: False)
if True, the head starts being drawn at coordinate 0
instead of ending at coordinate 0.
Other valid kwargs (inherited from :class:`Patch`) are:
%(Patch)s
"""
if head_width is None:
head_width = 20 * width
if head_length is None:
head_length = 1.5 * head_width
distance = np.sqrt(dx ** 2 + dy ** 2)
if length_includes_head:
length = distance
else:
length = distance + head_length
if not length:
verts = [] # display nothing if empty
else:
# start by drawing horizontal arrow, point at (0,0)
hw, hl, hs, lw = head_width, head_length, overhang, width
left_half_arrow = np.array([
[0.0, 0.0], # tip
[-hl, -hw / 2.0], # leftmost
[-hl * (1 - hs), -lw / 2.0], # meets stem
[-length, -lw / 2.0], # bottom left
[-length, 0],
])
#if we're not including the head, shift up by head length
if not length_includes_head:
left_half_arrow += [head_length, 0]
#if the head starts at 0, shift up by another head length
if head_starts_at_zero:
left_half_arrow += [head_length / 2.0, 0]
#figure out the shape, and complete accordingly
if shape == 'left':
coords = left_half_arrow
else:
right_half_arrow = left_half_arrow * [1, -1]
if shape == 'right':
coords = right_half_arrow
elif shape == 'full':
# The half-arrows contain the midpoint of the stem,
# which we can omit from the full arrow. Including it
# twice caused a problem with xpdf.
coords = np.concatenate([left_half_arrow[:-1],
right_half_arrow[-2::-1]])
else:
raise ValueError("Got unknown shape: %s" % shape)
cx = float(dx) / distance
sx = float(dy) / distance
M = np.array([[cx, sx], [-sx, cx]])
verts = np.dot(coords, M) + (x + dx, y + dy)
Polygon.__init__(self, list(map(tuple, verts)), closed=True, **kwargs)
docstring.interpd.update({"FancyArrow": FancyArrow.__init__.__doc__})
docstring.interpd.update({"FancyArrow": FancyArrow.__init__.__doc__})
class YAArrow(Patch):
"""
Yet another arrow class.
This is an arrow that is defined in display space and has a tip at
*x1*, *y1* and a base at *x2*, *y2*.
"""
def __str__(self):
return "YAArrow()"
@docstring.dedent_interpd
def __init__(self, figure, xytip, xybase,
width=4, frac=0.1, headwidth=12, **kwargs):
"""
Constructor arguments:
*xytip*
(*x*, *y*) location of arrow tip
*xybase*
(*x*, *y*) location the arrow base mid point
*figure*
The :class:`~matplotlib.figure.Figure` instance
(fig.dpi)
*width*
The width of the arrow in points
*frac*
The fraction of the arrow length occupied by the head
*headwidth*
The width of the base of the arrow head in points
Valid kwargs are:
%(Patch)s
"""
self.xytip = xytip
self.xybase = xybase
self.width = width
self.frac = frac
self.headwidth = headwidth
Patch.__init__(self, **kwargs)
# Set self.figure after Patch.__init__, since it sets self.figure to
# None
self.figure = figure
def get_path(self):
# Since this is dpi dependent, we need to recompute the path
# every time.
# the base vertices
x1, y1 = self.xytip
x2, y2 = self.xybase
k1 = self.width * self.figure.dpi / 72. / 2.
k2 = self.headwidth * self.figure.dpi / 72. / 2.
xb1, yb1, xb2, yb2 = self.getpoints(x1, y1, x2, y2, k1)
# a point on the segment 20% of the distance from the tip to the base
theta = math.atan2(y2 - y1, x2 - x1)
r = math.sqrt((y2 - y1) ** 2. + (x2 - x1) ** 2.)
xm = x1 + self.frac * r * math.cos(theta)
ym = y1 + self.frac * r * math.sin(theta)
xc1, yc1, xc2, yc2 = self.getpoints(x1, y1, xm, ym, k1)
xd1, yd1, xd2, yd2 = self.getpoints(x1, y1, xm, ym, k2)
xs = self.convert_xunits([xb1, xb2, xc2, xd2, x1, xd1, xc1, xb1])
ys = self.convert_yunits([yb1, yb2, yc2, yd2, y1, yd1, yc1, yb1])
return Path(list(zip(xs, ys)), closed=True)
def get_patch_transform(self):
return transforms.IdentityTransform()
def getpoints(self, x1, y1, x2, y2, k):
"""
For line segment defined by (*x1*, *y1*) and (*x2*, *y2*)
return the points on the line that is perpendicular to the
line and intersects (*x2*, *y2*) and the distance from (*x2*,
*y2*) of the returned points is *k*.
"""
x1, y1, x2, y2, k = list(map(float, (x1, y1, x2, y2, k)))
if y2 - y1 == 0:
return x2, y2 + k, x2, y2 - k
elif x2 - x1 == 0:
return x2 + k, y2, x2 - k, y2
m = (y2 - y1) / (x2 - x1)
pm = -1. / m
a = 1
b = -2 * y2
c = y2 ** 2. - k ** 2. * pm ** 2. / (1. + pm ** 2.)
y3a = (-b + math.sqrt(b ** 2. - 4 * a * c)) / (2. * a)
x3a = (y3a - y2) / pm + x2
y3b = (-b - math.sqrt(b ** 2. - 4 * a * c)) / (2. * a)
x3b = (y3b - y2) / pm + x2
return x3a, y3a, x3b, y3b
class CirclePolygon(RegularPolygon):
"""
A polygon-approximation of a circle patch.
"""
def __str__(self):
return "CirclePolygon(%d,%d)" % self.center
@docstring.dedent_interpd
def __init__(self, xy, radius=5,
resolution=20, # the number of vertices
** kwargs):
"""
Create a circle at *xy* = (*x*, *y*) with given *radius*.
This circle is approximated by a regular polygon with
*resolution* sides. For a smoother circle drawn with splines,
see :class:`~matplotlib.patches.Circle`.
Valid kwargs are:
%(Patch)s
"""
RegularPolygon.__init__(self, xy,
resolution,
radius,
orientation=0,
**kwargs)
class Ellipse(Patch):
"""
A scale-free ellipse.
"""
def __str__(self):
return "Ellipse(%s,%s;%sx%s)" % (self.center[0], self.center[1],
self.width, self.height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
*xy*
center of ellipse
*width*
total length (diameter) of horizontal axis
*height*
total length (diameter) of vertical axis
*angle*
rotation in degrees (anti-clockwise)
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = xy
self.width, self.height = width, height
self.angle = angle
self._path = Path.unit_circle()
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = transforms.IdentityTransform()
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
center = (self.convert_xunits(self.center[0]),
self.convert_yunits(self.center[1]))
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
self._patch_transform = transforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.rotate_deg(self.angle) \
.translate(*center)
def get_path(self):
"""
Return the vertices of the rectangle
"""
return self._path
def get_patch_transform(self):
self._recompute_transform()
return self._patch_transform
def contains(self, ev):
if ev.x is None or ev.y is None:
return False, {}
x, y = self.get_transform().inverted().transform_point((ev.x, ev.y))
return (x * x + y * y) <= 1.0, {}
class Circle(Ellipse):
"""
A circle patch.
"""
def __str__(self):
return "Circle((%g,%g),r=%g)" % (self.center[0],
self.center[1],
self.radius)
@docstring.dedent_interpd
def __init__(self, xy, radius=5, **kwargs):
"""
Create true circle at center *xy* = (*x*, *y*) with given
*radius*. Unlike :class:`~matplotlib.patches.CirclePolygon`
which is a polygonal approximation, this uses Bézier splines
and is much closer to a scale-free circle.
Valid kwargs are:
%(Patch)s
"""
self.radius = radius
Ellipse.__init__(self, xy, radius * 2, radius * 2, **kwargs)
def set_radius(self, radius):
"""
Set the radius of the circle
ACCEPTS: float
"""
self.width = self.height = 2 * radius
def get_radius(self):
'return the radius of the circle'
return self.width / 2.
radius = property(get_radius, set_radius)
class Arc(Ellipse):
"""
An elliptical arc. Because it performs various optimizations, it
can not be filled.
The arc must be used in an :class:`~matplotlib.axes.Axes`
instance---it can not be added directly to a
:class:`~matplotlib.figure.Figure`---because it is optimized to
only render the segments that are inside the axes bounding box
with high resolution.
"""
def __str__(self):
return "Arc(%s,%s;%sx%s)" % (self.center[0], self.center[1],
self.width, self.height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0,
theta1=0.0, theta2=360.0, **kwargs):
"""
The following args are supported:
*xy*
center of ellipse
*width*
length of horizontal axis
*height*
length of vertical axis
*angle*
rotation in degrees (anti-clockwise)
*theta1*
starting angle of the arc in degrees
*theta2*
ending angle of the arc in degrees
If *theta1* and *theta2* are not provided, the arc will form a
complete ellipse.
Valid kwargs are:
%(Patch)s
"""
fill = kwargs.setdefault('fill', False)
if fill:
raise ValueError("Arc objects can not be filled")
Ellipse.__init__(self, xy, width, height, angle, **kwargs)
self.theta1 = theta1
self.theta2 = theta2
self._path = Path.arc(self.theta1, self.theta2)
@allow_rasterization
def draw(self, renderer):
"""
Ellipses are normally drawn using an approximation that uses
eight cubic bezier splines. The error of this approximation
is 1.89818e-6, according to this unverified source:
Lancaster, Don. Approximating a Circle or an Ellipse Using
Four Bezier Cubic Splines.
http://www.tinaja.com/glib/ellipse4.pdf
There is a use case where very large ellipses must be drawn
with very high accuracy, and it is too expensive to render the
entire ellipse with enough segments (either splines or line
segments). Therefore, in the case where either radius of the
ellipse is large enough that the error of the spline
approximation will be visible (greater than one pixel offset
from the ideal), a different technique is used.
In that case, only the visible parts of the ellipse are drawn,
with each visible arc using a fixed number of spline segments
(8). The algorithm proceeds as follows:
1. The points where the ellipse intersects the axes bounding
box are located. (This is done be performing an inverse
transformation on the axes bbox such that it is relative
to the unit circle -- this makes the intersection
calculation much easier than doing rotated ellipse
intersection directly).
This uses the "line intersecting a circle" algorithm
from:
Vince, John. Geometry for Computer Graphics: Formulae,
Examples & Proofs. London: Springer-Verlag, 2005.
2. The angles of each of the intersection points are
calculated.
3. Proceeding counterclockwise starting in the positive
x-direction, each of the visible arc-segments between the
pairs of vertices are drawn using the bezier arc
approximation technique implemented in
:meth:`matplotlib.path.Path.arc`.
"""
if not hasattr(self, 'axes'):
raise RuntimeError('Arcs can only be used in Axes instances')
self._recompute_transform()
# Get the width and height in pixels
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
width, height = self.get_transform().transform_point(
(width, height))
inv_error = (1.0 / 1.89818e-6) * 0.5
if width < inv_error and height < inv_error:
#self._path = Path.arc(self.theta1, self.theta2)
return Patch.draw(self, renderer)
def iter_circle_intersect_on_line(x0, y0, x1, y1):
dx = x1 - x0
dy = y1 - y0
dr2 = dx * dx + dy * dy
D = x0 * y1 - x1 * y0
D2 = D * D
discrim = dr2 - D2
# Single (tangential) intersection
if discrim == 0.0:
x = (D * dy) / dr2
y = (-D * dx) / dr2
yield x, y
elif discrim > 0.0:
# The definition of "sign" here is different from
# np.sign: we never want to get 0.0
if dy < 0.0:
sign_dy = -1.0
else:
sign_dy = 1.0
sqrt_discrim = np.sqrt(discrim)
for sign in (1., -1.):
x = (D * dy + sign * sign_dy * dx * sqrt_discrim) / dr2
y = (-D * dx + sign * np.abs(dy) * sqrt_discrim) / dr2
yield x, y
def iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
epsilon = 1e-9
if x1 < x0:
x0e, x1e = x1, x0
else:
x0e, x1e = x0, x1
if y1 < y0:
y0e, y1e = y1, y0
else:
y0e, y1e = y0, y1
x0e -= epsilon
y0e -= epsilon
x1e += epsilon
y1e += epsilon
for x, y in iter_circle_intersect_on_line(x0, y0, x1, y1):
if x >= x0e and x <= x1e and y >= y0e and y <= y1e:
yield x, y
# Transforms the axes box_path so that it is relative to the unit
# circle in the same way that it is relative to the desired
# ellipse.
box_path = Path.unit_rectangle()
box_path_transform = transforms.BboxTransformTo(self.axes.bbox) + \
self.get_transform().inverted()
box_path = box_path.transformed(box_path_transform)
PI = np.pi
TWOPI = PI * 2.0
RAD2DEG = 180.0 / PI
DEG2RAD = PI / 180.0
theta1 = self.theta1
theta2 = self.theta2
thetas = {}
# For each of the point pairs, there is a line segment
for p0, p1 in zip(box_path.vertices[:-1], box_path.vertices[1:]):
x0, y0 = p0
x1, y1 = p1
for x, y in iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
theta = np.arccos(x)
if y < 0:
theta = TWOPI - theta
# Convert radians to angles
theta *= RAD2DEG
if theta > theta1 and theta < theta2:
thetas[theta] = None
thetas = list(six.iterkeys(thetas))
thetas.sort()
thetas.append(theta2)
last_theta = theta1
theta1_rad = theta1 * DEG2RAD
inside = box_path.contains_point((np.cos(theta1_rad),
np.sin(theta1_rad)))
# save original path
path_original = self._path
for theta in thetas:
if inside:
Path.arc(last_theta, theta, 8)
Patch.draw(self, renderer)
inside = False
else:
inside = True
last_theta = theta
# restore original path
self._path = path_original
def bbox_artist(artist, renderer, props=None, fill=True):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
*props* is a dict of rectangle props with the additional property
'pad' that sets the padding around the bbox in points.
"""
if props is None:
props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = artist.get_window_extent(renderer)
l, b, w, h = bbox.bounds
l -= pad / 2.
b -= pad / 2.
w += pad
h += pad
r = Rectangle(xy=(l, b),
width=w,
height=h,
fill=fill,
)
r.set_transform(transforms.IdentityTransform())
r.set_clip_on(False)
r.update(props)
r.draw(renderer)
def draw_bbox(bbox, renderer, color='k', trans=None):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
"""
l, b, w, h = bbox.bounds
r = Rectangle(xy=(l, b),
width=w,
height=h,
edgecolor=color,
fill=False,
)
if trans is not None:
r.set_transform(trans)
r.set_clip_on(False)
r.draw(renderer)
def _pprint_table(_table, leadingspace=2):
"""
Given the list of list of strings, return a string of REST table format.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
columns = [[] for cell in _table[0]]
for row in _table:
for column, cell in zip(columns, row):
column.append(cell)
col_len = [max([len(cell) for cell in column]) for column in columns]
lines = []
table_formatstr = pad + ' '.join([('=' * cl) for cl in col_len])
lines.append('')
lines.append(table_formatstr)
lines.append(pad + ' '.join([cell.ljust(cl)
for cell, cl
in zip(_table[0], col_len)]))
lines.append(table_formatstr)
lines.extend([(pad + ' '.join([cell.ljust(cl)
for cell, cl
in zip(row, col_len)]))
for row in _table[1:]])
lines.append(table_formatstr)
lines.append('')
return "\n".join(lines)
def _pprint_styles(_styles):
"""
A helper function for the _Style class. Given the dictionary of
(stylename : styleclass), return a formatted string listing all the
styles. Used to update the documentation.
"""
names, attrss, clss = [], [], []
import inspect
_table = [["Class", "Name", "Attrs"]]
for name, cls in sorted(_styles.items()):
args, varargs, varkw, defaults = inspect.getargspec(cls.__init__)
if defaults:
args = [(argname, argdefault)
for argname, argdefault in zip(args[1:], defaults)]
else:
args = None
if args is None:
argstr = 'None'
else:
argstr = ",".join([("%s=%s" % (an, av))
for an, av
in args])
#adding ``quotes`` since - and | have special meaning in reST
_table.append([cls.__name__, "``%s``" % name, argstr])
return _pprint_table(_table)
class _Style(object):
"""
A base class for the Styles. It is meant to be a container class,
where actual styles are declared as subclass of it, and it
provides some helper functions.
"""
def __new__(self, stylename, **kw):
"""
return the instance of the subclass with the given style name.
"""
# the "class" should have the _style_list attribute, which is
# a dictionary of stylname, style class paie.
_list = stylename.replace(" ", "").split(",")
_name = _list[0].lower()
try:
_cls = self._style_list[_name]
except KeyError:
raise ValueError("Unknown style : %s" % stylename)
try:
_args_pair = [cs.split("=") for cs in _list[1:]]
_args = dict([(k, float(v)) for k, v in _args_pair])
except ValueError:
raise ValueError("Incorrect style argument : %s" % stylename)
_args.update(kw)
return _cls(**_args)
@classmethod
def get_styles(klass):
"""
A class method which returns a dictionary of available styles.
"""
return klass._style_list
@classmethod
def pprint_styles(klass):
"""
A class method which returns a string of the available styles.
"""
return _pprint_styles(klass._style_list)
@classmethod
def register(klass, name, style):
"""
Register a new style.
"""
if not issubclass(style, klass._Base):
raise ValueError("%s must be a subclass of %s" % (style,
klass._Base))
klass._style_list[name] = style
class BoxStyle(_Style):
"""
:class:`BoxStyle` is a container class which defines several
boxstyle classes, which are used for :class:`FancyBoxPatch`.
A style object can be created as::
BoxStyle.Round(pad=0.2)
or::
BoxStyle("Round", pad=0.2)
or::
BoxStyle("Round, pad=0.2")
Following boxstyle classes are defined.
%(AvailableBoxstyles)s
An instance of any boxstyle class is an callable object,
whose call signature is::
__call__(self, x0, y0, width, height, mutation_size, aspect_ratio=1.)
and returns a :class:`Path` instance. *x0*, *y0*, *width* and
*height* specify the location and size of the box to be
drawn. *mutation_scale* determines the overall size of the
mutation (by which I mean the transformation of the rectangle to
the fancy box). *mutation_aspect* determines the aspect-ratio of
the mutation.
.. plot:: mpl_examples/pylab_examples/fancybox_demo2.py
"""
_style_list = {}
class _Base(object):
"""
:class:`BBoxTransmuterBase` and its derivatives are used to make a
fancy box around a given rectangle. The :meth:`__call__` method
returns the :class:`~matplotlib.path.Path` of the fancy box. This
class is not an artist and actual drawing of the fancy box is done
by the :class:`FancyBboxPatch` class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
"""
initializtion.
"""
super(BoxStyle._Base, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
"""
The transmute method is a very core of the
:class:`BboxTransmuter` class and must be overriden in the
subclasses. It receives the location and size of the
rectangle, and the mutation_size, with which the amount of
padding and etc. will be scaled. It returns a
:class:`~matplotlib.path.Path` instance.
"""
raise NotImplementedError('Derived must override')
def __call__(self, x0, y0, width, height, mutation_size,
aspect_ratio=1.):
"""
Given the location and size of the box, return the path of
the box around it.
- *x0*, *y0*, *width*, *height* : location and size of the box
- *mutation_size* : a reference scale for the mutation.
- *aspect_ratio* : aspect-ration for the mutation.
"""
# The __call__ method is a thin wrapper around the transmute method
# and take care of the aspect.
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
y0, height = y0 / aspect_ratio, height / aspect_ratio
# call transmute method with squeezed height.
path = self.transmute(x0, y0, width, height, mutation_size)
vertices, codes = path.vertices, path.codes
# Restore the height
vertices[:, 1] = vertices[:, 1] * aspect_ratio
return Path(vertices, codes)
else:
return self.transmute(x0, y0, width, height, mutation_size)
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(BoxStyle, self.__class__.__name__),
self.__dict__
)
class Square(_Base):
"""
A simple square box.
"""
def __init__(self, pad=0.3):
"""
*pad*
amount of padding
"""
self.pad = pad
super(BoxStyle.Square, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2*pad, height + 2*pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
vertices = [(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)]
codes = [Path.MOVETO] + [Path.LINETO] * 3 + [Path.CLOSEPOLY]
return Path(vertices, codes)
_style_list["square"] = Square
class Circle(_Base):
"""A simple circle box."""
def __init__(self, pad=0.3):
"""
Parameters
----------
pad : float
The amount of padding around the original box.
"""
self.pad = pad
super(BoxStyle.Circle, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
width, height = width + 2 * pad, height + 2 * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
return Path.circle((x0 + width/2., y0 + height/2.),
(max([width, height]) / 2.))
_style_list["circle"] = Circle
class LArrow(_Base):
"""
(left) Arrow Box
"""
def __init__(self, pad=0.3):
self.pad = pad
super(BoxStyle.LArrow, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2. * pad, \
height + 2. * pad,
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
dx = (y1 - y0) / 2.
dxx = dx * .5
# adjust x0. 1.4 <- sqrt(2)
x0 = x0 + pad / 1.4
cp = [(x0 + dxx, y0), (x1, y0), (x1, y1), (x0 + dxx, y1),
(x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx),
(x0 + dxx, y0 - dxx), # arrow
(x0 + dxx, y0), (x0 + dxx, y0)]
com = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["larrow"] = LArrow
class RArrow(LArrow):
"""
(right) Arrow Box
"""
def __init__(self, pad=0.3):
#self.pad = pad
super(BoxStyle.RArrow, self).__init__(pad)
def transmute(self, x0, y0, width, height, mutation_size):
p = BoxStyle.LArrow.transmute(self, x0, y0,
width, height, mutation_size)
p.vertices[:, 0] = 2 * x0 + width - p.vertices[:, 0]
return p
_style_list["rarrow"] = RArrow
class Round(_Base):
"""
A box with round corners.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding radius of corners. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of the roudning corner
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad
width, height = width + 2. * pad, \
height + 2. * pad,
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
# Round corners are implemented as quadratic bezier. e.g.,
# [(x0, y0-dr), (x0, y0), (x0+dr, y0)] for lower left corner.
cp = [(x0 + dr, y0),
(x1 - dr, y0),
(x1, y0), (x1, y0 + dr),
(x1, y1 - dr),
(x1, y1), (x1 - dr, y1),
(x0 + dr, y1),
(x0, y1), (x0, y1 - dr),
(x0, y0 + dr),
(x0, y0), (x0 + dr, y0),
(x0 + dr, y0)]
com = [Path.MOVETO,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round"] = Round
class Round4(_Base):
"""
Another box with round edges.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding size of edges. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round4, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# roudning size. Use a half of the pad if not set.
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad / 2.
width, height = width + 2. * pad - 2 * dr, \
height + 2. * pad - 2 * dr,
x0, y0 = x0 - pad + dr, y0 - pad + dr,
x1, y1 = x0 + width, y0 + height
cp = [(x0, y0),
(x0 + dr, y0 - dr), (x1 - dr, y0 - dr), (x1, y0),
(x1 + dr, y0 + dr), (x1 + dr, y1 - dr), (x1, y1),
(x1 - dr, y1 + dr), (x0 + dr, y1 + dr), (x0, y1),
(x0 - dr, y1 - dr), (x0 - dr, y0 + dr), (x0, y0),
(x0, y0)]
com = [Path.MOVETO,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round4"] = Round4
class Sawtooth(_Base):
"""
A sawtooth box.
"""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
self.pad = pad
self.tooth_size = tooth_size
super(BoxStyle.Sawtooth, self).__init__()
def _get_sawtooth_vertices(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of sawtooth
if self.tooth_size is None:
tooth_size = self.pad * .5 * mutation_size
else:
tooth_size = self.tooth_size * mutation_size
tooth_size2 = tooth_size / 2.
width, height = width + 2. * pad - tooth_size, \
height + 2. * pad - tooth_size,
# the sizes of the vertical and horizontal sawtooth are
# separately adjusted to fit the given box size.
dsx_n = int(round((width - tooth_size) / (tooth_size * 2))) * 2
dsx = (width - tooth_size) / dsx_n
dsy_n = int(round((height - tooth_size) / (tooth_size * 2))) * 2
dsy = (height - tooth_size) / dsy_n
x0, y0 = x0 - pad + tooth_size2, y0 - pad + tooth_size2
x1, y1 = x0 + width, y0 + height
bottom_saw_x = [x0] + \
[x0 + tooth_size2 + dsx * .5 * i
for i
in range(dsx_n * 2)] + \
[x1 - tooth_size2]
bottom_saw_y = [y0] + \
[y0 - tooth_size2, y0,
y0 + tooth_size2, y0] * dsx_n + \
[y0 - tooth_size2]
right_saw_x = [x1] + \
[x1 + tooth_size2,
x1,
x1 - tooth_size2,
x1] * dsx_n + \
[x1 + tooth_size2]
right_saw_y = [y0] + \
[y0 + tooth_size2 + dsy * .5 * i
for i
in range(dsy_n * 2)] + \
[y1 - tooth_size2]
top_saw_x = [x1] + \
[x1 - tooth_size2 - dsx * .5 * i
for i
in range(dsx_n * 2)] + \
[x0 + tooth_size2]
top_saw_y = [y1] + \
[y1 + tooth_size2,
y1,
y1 - tooth_size2,
y1] * dsx_n + \
[y1 + tooth_size2]
left_saw_x = [x0] + \
[x0 - tooth_size2,
x0,
x0 + tooth_size2,
x0] * dsy_n + \
[x0 - tooth_size2]
left_saw_y = [y1] + \
[y1 - tooth_size2 - dsy * .5 * i
for i
in range(dsy_n * 2)] + \
[y0 + tooth_size2]
saw_vertices = list(zip(bottom_saw_x, bottom_saw_y)) + \
list(zip(right_saw_x, right_saw_y)) + \
list(zip(top_saw_x, top_saw_y)) + \
list(zip(left_saw_x, left_saw_y)) + \
[(bottom_saw_x[0], bottom_saw_y[0])]
return saw_vertices
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0, width,
height, mutation_size)
path = Path(saw_vertices, closed=True)
return path
_style_list["sawtooth"] = Sawtooth
class Roundtooth(Sawtooth):
"""A rounded tooth box."""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
super(BoxStyle.Roundtooth, self).__init__(pad, tooth_size)
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0,
width, height,
mutation_size)
# Add a trailing vertex to allow us to close the polygon correctly
saw_vertices = np.concatenate([np.array(saw_vertices),
[saw_vertices[0]]], axis=0)
codes = ([Path.MOVETO] +
[Path.CURVE3, Path.CURVE3] * ((len(saw_vertices)-1) // 2) +
[Path.CLOSEPOLY])
return Path(saw_vertices, codes)
_style_list["roundtooth"] = Roundtooth
if __doc__: # __doc__ could be None if -OO optimization is enabled
__doc__ = cbook.dedent(__doc__) % \
{"AvailableBoxstyles": _pprint_styles(_style_list)}
docstring.interpd.update(
AvailableBoxstyles=_pprint_styles(BoxStyle._style_list))
class FancyBboxPatch(Patch):
"""
Draw a fancy box around a rectangle with lower left at *xy*=(*x*,
*y*) with specified width and height.
:class:`FancyBboxPatch` class is similar to :class:`Rectangle`
class, but it draws a fancy box around the rectangle. The
transformation of the rectangle box to the fancy box is delegated
to the :class:`BoxTransmuterBase` and its derived classes.
"""
def __str__(self):
return self.__class__.__name__ \
+ "(%g,%g;%gx%g)" % (self._x, self._y,
self._width, self._height)
@docstring.dedent_interpd
def __init__(self, xy, width, height,
boxstyle="round",
bbox_transmuter=None,
mutation_scale=1.,
mutation_aspect=None,
**kwargs):
"""
*xy* = lower left corner
*width*, *height*
*boxstyle* determines what kind of fancy box will be drawn. It
can be a string of the style name with a comma separated
attribute, or an instance of :class:`BoxStyle`. Following box
styles are available.
%(AvailableBoxstyles)s
*mutation_scale* : a value with which attributes of boxstyle
(e.g., pad) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = xy[0]
self._y = xy[1]
self._width = width
self._height = height
if boxstyle == "custom":
if bbox_transmuter is None:
raise ValueError("bbox_transmuter argument is needed with "
"custom boxstyle")
self._bbox_transmuter = bbox_transmuter
else:
self.set_boxstyle(boxstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
@docstring.dedent_interpd
def set_boxstyle(self, boxstyle=None, **kw):
"""
Set the box style.
*boxstyle* can be a string with boxstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords::
set_boxstyle("round,pad=0.2")
set_boxstyle("round", pad=0.2)
Old attrs simply are forgotten.
Without argument (or with *boxstyle* = None), it returns
available box styles.
ACCEPTS: %(AvailableBoxstyles)s
"""
if boxstyle is None:
return BoxStyle.pprint_styles()
if isinstance(boxstyle, BoxStyle._Base):
self._bbox_transmuter = boxstyle
elif six.callable(boxstyle):
self._bbox_transmuter = boxstyle
else:
self._bbox_transmuter = BoxStyle(boxstyle, **kw)
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale = scale
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect = aspect
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_boxstyle(self):
"Return the boxstyle object"
return self._bbox_transmuter
def get_path(self):
"""
Return the mutated path of the rectangle
"""
_path = self.get_boxstyle()(self._x, self._y,
self._width, self._height,
self.get_mutation_scale(),
self.get_mutation_aspect())
return _path
# Following methods are borrowed from the Rectangle class.
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args) == 0:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x = l
self._y = b
self._width = w
self._height = h
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y,
self._width, self._height)
from matplotlib.bezier import split_bezier_intersecting_with_closedpath
from matplotlib.bezier import get_intersection, inside_circle, get_parallels
from matplotlib.bezier import make_wedged_bezier2
from matplotlib.bezier import split_path_inout, get_cos_sin
from matplotlib.bezier import make_path_regular, concatenate_paths
class ConnectionStyle(_Style):
"""
:class:`ConnectionStyle` is a container class which defines
several connectionstyle classes, which is used to create a path
between two points. These are mainly used with
:class:`FancyArrowPatch`.
A connectionstyle object can be either created as::
ConnectionStyle.Arc3(rad=0.2)
or::
ConnectionStyle("Arc3", rad=0.2)
or::
ConnectionStyle("Arc3, rad=0.2")
The following classes are defined
%(AvailableConnectorstyles)s
An instance of any connection style class is an callable object,
whose call signature is::
__call__(self, posA, posB,
patchA=None, patchB=None,
shrinkA=2., shrinkB=2.)
and it returns a :class:`Path` instance. *posA* and *posB* are
tuples of x,y coordinates of the two points to be
connected. *patchA* (or *patchB*) is given, the returned path is
clipped so that it start (or end) from the boundary of the
patch. The path is further shrunk by *shrinkA* (or *shrinkB*)
which is given in points.
"""
_style_list = {}
class _Base(object):
"""
A base class for connectionstyle classes. The dervided needs
to implement a *connect* methods whose call signature is::
connect(posA, posB)
where posA and posB are tuples of x, y coordinates to be
connected. The methods needs to return a path connecting two
points. This base class defines a __call__ method, and few
helper methods.
"""
class SimpleEvent:
def __init__(self, xy):
self.x, self.y = xy
def _clip(self, path, patchA, patchB):
"""
Clip the path to the boundary of the patchA and patchB.
The starting point of the path needed to be inside of the
patchA and the end point inside the patch B. The *contains*
methods of each patch object is utilized to test if the point
is inside the path.
"""
if patchA:
def insideA(xy_display):
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchA.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideA)
except ValueError:
right = path
path = right
if patchB:
def insideB(xy_display):
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchB.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideB)
except ValueError:
left = path
path = left
return path
def _shrink(self, path, shrinkA, shrinkB):
"""
Shrink the path by fixed size (in points) with shrinkA and shrinkB
"""
if shrinkA:
x, y = path.vertices[0]
insideA = inside_circle(x, y, shrinkA)
try:
left, right = split_path_inout(path, insideA)
path = right
except ValueError:
pass
if shrinkB:
x, y = path.vertices[-1]
insideB = inside_circle(x, y, shrinkB)
try:
left, right = split_path_inout(path, insideB)
path = left
except ValueError:
pass
return path
def __call__(self, posA, posB,
shrinkA=2., shrinkB=2., patchA=None, patchB=None):
"""
Calls the *connect* method to create a path between *posA*
and *posB*. The path is clipped and shrinked.
"""
path = self.connect(posA, posB)
clipped_path = self._clip(path, patchA, patchB)
shrinked_path = self._shrink(clipped_path, shrinkA, shrinkB)
return shrinked_path
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(ConnectionStyle, self.__class__.__name__),
self.__dict__
)
class Arc3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The curve is created so that the middle contol points
(C1) is located at the same distance from the start (C0) and
end points(C2) and the distance of the C1 to the line
connecting C0-C2 is *rad* times the distance of C0-C2.
"""
def __init__(self, rad=0.):
"""
*rad*
curvature of the curve.
"""
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
dx, dy = x2 - x1, y2 - y1
f = self.rad
cx, cy = x12 + f * dy, y12 - f * dx
vertices = [(x1, y1),
(cx, cy),
(x2, y2)]
codes = [Path.MOVETO,
Path.CURVE3,
Path.CURVE3]
return Path(vertices, codes)
_style_list["arc3"] = Arc3
class Angle3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The middle control points is placed at the
intersecting point of two lines which crosses the start (or
end) point and has a angle of angleA (or angleB).
"""
def __init__(self, angleA=90, angleB=0):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
"""
self.angleA = angleA
self.angleB = angleB
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = math.cos(self.angleA / 180. * math.pi),\
math.sin(self.angleA / 180. * math.pi),
cosB, sinB = math.cos(self.angleB / 180. * math.pi),\
math.sin(self.angleB / 180. * math.pi),
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1), (cx, cy), (x2, y2)]
codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
return Path(vertices, codes)
_style_list["angle3"] = Angle3
class Angle(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path has a one passing-through point placed at
the intersecting point of two lines which crosses the start
(or end) point and has a angle of angleA (or angleB). The
connecting edges are rounded with *rad*.
"""
def __init__(self, angleA=90, angleB=0, rad=0.):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
*rad*
rounding radius of the edge
"""
self.angleA = angleA
self.angleB = angleB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = math.cos(self.angleA / 180. * math.pi),\
math.sin(self.angleA / 180. * math.pi),
cosB, sinB = math.cos(self.angleB / 180. * math.pi),\
math.sin(self.angleB / 180. * math.pi),
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1)]
codes = [Path.MOVETO]
if self.rad == 0.:
vertices.append((cx, cy))
codes.append(Path.LINETO)
else:
dx1, dy1 = x1 - cx, y1 - cy
d1 = (dx1 ** 2 + dy1 ** 2) ** .5
f1 = self.rad / d1
dx2, dy2 = x2 - cx, y2 - cy
d2 = (dx2 ** 2 + dy2 ** 2) ** .5
f2 = self.rad / d2
vertices.extend([(cx + dx1 * f1, cy + dy1 * f1),
(cx, cy),
(cx + dx2 * f2, cy + dy2 * f2)])
codes.extend([Path.LINETO, Path.CURVE3, Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["angle"] = Angle
class Arc(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path can have two passing-through points, a
point placed at the distance of armA and angle of angleA from
point A, another point with respect to point B. The edges are
rounded with *rad*.
"""
def __init__(self, angleA=0, angleB=0, armA=None, armB=None, rad=0.):
"""
*angleA* :
starting angle of the path
*angleB* :
ending angle of the path
*armA* :
length of the starting arm
*armB* :
length of the ending arm
*rad* :
rounding radius of the edges
"""
self.angleA = angleA
self.angleB = angleB
self.armA = armA
self.armB = armB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
vertices = [(x1, y1)]
rounded = []
codes = [Path.MOVETO]
if self.armA:
cosA = math.cos(self.angleA / 180. * math.pi)
sinA = math.sin(self.angleA / 180. * math.pi)
#x_armA, y_armB
d = self.armA - self.rad
rounded.append((x1 + d * cosA, y1 + d * sinA))
d = self.armA
rounded.append((x1 + d * cosA, y1 + d * sinA))
if self.armB:
cosB = math.cos(self.angleB / 180. * math.pi)
sinB = math.sin(self.angleB / 180. * math.pi)
x_armB, y_armB = x2 + self.armB * cosB, y2 + self.armB * sinB
if rounded:
xp, yp = rounded[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
else:
xp, yp = vertices[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
d = dd - self.rad
rounded = [(xp + d * dx / dd, yp + d * dy / dd),
(x_armB, y_armB)]
if rounded:
xp, yp = rounded[-1]
dx, dy = x2 - xp, y2 - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["arc"] = Arc
class Bar(_Base):
"""
A line with *angle* between A and B with *armA* and
*armB*. One of the arm is extend so that they are connected in
a right angle. The length of armA is determined by (*armA*
+ *fraction* x AB distance). Same for armB.
"""
def __init__(self, armA=0., armB=0., fraction=0.3, angle=None):
"""
*armA* : minimum length of armA
*armB* : minimum length of armB
*fraction* : a fraction of the distance between two points that
will be added to armA and armB.
*angle* : angle of the connecting line (if None, parallel to A
and B)
"""
self.armA = armA
self.armB = armB
self.fraction = fraction
self.angle = angle
def connect(self, posA, posB):
x1, y1 = posA
x20, y20 = x2, y2 = posB
x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
theta1 = math.atan2(y2 - y1, x2 - x1)
dx, dy = x2 - x1, y2 - y1
dd = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd, dy / dd
armA, armB = self.armA, self.armB
if self.angle is not None:
#angle = self.angle % 180.
#if angle < 0. or angle > 180.:
# angle
#theta0 = (self.angle%180.)/180.*math.pi
theta0 = self.angle / 180. * math.pi
#theta0 = (((self.angle+90)%180.) - 90.)/180.*math.pi
dtheta = theta1 - theta0
dl = dd * math.sin(dtheta)
dL = dd * math.cos(dtheta)
#x2, y2 = x2 + dl*ddy, y2 - dl*ddx
x2, y2 = x1 + dL * math.cos(theta0), y1 + dL * math.sin(theta0)
armB = armB - dl
# update
dx, dy = x2 - x1, y2 - y1
dd2 = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd2, dy / dd2
else:
dl = 0.
#if armA > armB:
# armB = armA + dl
#else:
# armA = armB - dl
arm = max(armA, armB)
f = self.fraction * dd + arm
#fB = self.fraction*dd + armB
cx1, cy1 = x1 + f * ddy, y1 - f * ddx
cx2, cy2 = x2 + f * ddy, y2 - f * ddx
vertices = [(x1, y1),
(cx1, cy1),
(cx2, cy2),
(x20, y20)]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return Path(vertices, codes)
_style_list["bar"] = Bar
if __doc__:
__doc__ = cbook.dedent(__doc__) % \
{"AvailableConnectorstyles": _pprint_styles(_style_list)}
def _point_along_a_line(x0, y0, x1, y1, d):
"""
find a point along a line connecting (x0, y0) -- (x1, y1) whose
distance from (x0, y0) is d.
"""
dx, dy = x0 - x1, y0 - y1
ff = d / (dx * dx + dy * dy) ** .5
x2, y2 = x0 - ff * dx, y0 - ff * dy
return x2, y2
class ArrowStyle(_Style):
"""
:class:`ArrowStyle` is a container class which defines several
arrowstyle classes, which is used to create an arrow path along a
given path. These are mainly used with :class:`FancyArrowPatch`.
A arrowstyle object can be either created as::
ArrowStyle.Fancy(head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy", head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy, head_length=.4, head_width=.4, tail_width=.4")
The following classes are defined
%(AvailableArrowstyles)s
An instance of any arrow style class is an callable object,
whose call signature is::
__call__(self, path, mutation_size, linewidth, aspect_ratio=1.)
and it returns a tuple of a :class:`Path` instance and a boolean
value. *path* is a :class:`Path` instance along witch the arrow
will be drawn. *mutation_size* and *aspect_ratio* has a same
meaning as in :class:`BoxStyle`. *linewidth* is a line width to be
stroked. This is meant to be used to correct the location of the
head so that it does not overshoot the destination point, but not all
classes support it.
.. plot:: mpl_examples/pylab_examples/fancyarrow_demo.py
"""
_style_list = {}
class _Base(object):
"""
Arrow Transmuter Base class
ArrowTransmuterBase and its derivatives are used to make a fancy
arrow around a given path. The __call__ method returns a path
(which will be used to create a PathPatch instance) and a boolean
value indicating the path is open therefore is not fillable. This
class is not an artist and actual drawing of the fancy arrow is
done by the FancyArrowPatch class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
super(ArrowStyle._Base, self).__init__()
@staticmethod
def ensure_quadratic_bezier(path):
""" Some ArrowStyle class only wokrs with a simple
quaratic bezier curve (created with Arc3Connetion or
Angle3Connector). This static method is to check if the
provided path is a simple quadratic bezier curve and returns
its control points if true.
"""
segments = list(path.iter_segments())
assert len(segments) == 2
assert segments[0][1] == Path.MOVETO
assert segments[1][1] == Path.CURVE3
return list(segments[0][0]) + list(segments[1][0])
def transmute(self, path, mutation_size, linewidth):
"""
The transmute method is a very core of the ArrowStyle
class and must be overriden in the subclasses. It receives
the path object along which the arrow will be drawn, and
the mutation_size, with which the amount arrow head and
etc. will be scaled. The linewidth may be used to adjust
the the path so that it does not pass beyond the given
points. It returns a tuple of a Path instance and a
boolean. The boolean value indicate whether the path can
be filled or not. The return value can also be a list of paths
and list of booleans of a same length.
"""
raise NotImplementedError('Derived must override')
def __call__(self, path, mutation_size, linewidth,
aspect_ratio=1.):
"""
The __call__ method is a thin wrapper around the transmute method
and take care of the aspect ratio.
"""
path = make_path_regular(path)
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
vertices, codes = path.vertices[:], path.codes[:]
# Squeeze the height
vertices[:, 1] = vertices[:, 1] / aspect_ratio
path_shrinked = Path(vertices, codes)
# call transmute method with squeezed height.
path_mutated, fillable = self.transmute(path_shrinked,
linewidth,
mutation_size)
if cbook.iterable(fillable):
path_list = []
for p in zip(path_mutated):
v, c = p.vertices, p.codes
# Restore the height
v[:, 1] = v[:, 1] * aspect_ratio
path_list.append(Path(v, c))
return path_list, fillable
else:
return path_mutated, fillable
else:
return self.transmute(path, mutation_size, linewidth)
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(ArrowStyle, self.__class__.__name__),
self.__dict__
)
class _Curve(_Base):
"""
A simple arrow which will work with any path instance. The
returned path is simply concatenation of the original path + at
most two paths representing the arrow head at the begin point and the
at the end point. The arrow heads can be either open or closed.
"""
def __init__(self, beginarrow=None, endarrow=None,
fillbegin=False, fillend=False,
head_length=.2, head_width=.1):
"""
The arrows are drawn if *beginarrow* and/or *endarrow* are
true. *head_length* and *head_width* determines the size
of the arrow relative to the *mutation scale*. The
arrowhead at the begin (or end) is closed if fillbegin (or
fillend) is True.
"""
self.beginarrow, self.endarrow = beginarrow, endarrow
self.head_length, self.head_width = \
head_length, head_width
self.fillbegin, self.fillend = fillbegin, fillend
super(ArrowStyle._Curve, self).__init__()
def _get_arrow_wedge(self, x0, y0, x1, y1,
head_dist, cos_t, sin_t, linewidth
):
"""
Return the paths for arrow heads. Since arrow lines are
drawn with capstyle=projected, The arrow goes beyond the
desired point. This method also returns the amount of the path
to be shrinked so that it does not overshoot.
"""
# arrow from x0, y0 to x1, y1
dx, dy = x0 - x1, y0 - y1
cp_distance = math.sqrt(dx ** 2 + dy ** 2)
# pad_projected : amount of pad to account the
# overshooting of the projection of the wedge
pad_projected = (.5 * linewidth / sin_t)
# apply pad for projected edge
ddx = pad_projected * dx / cp_distance
ddy = pad_projected * dy / cp_distance
# offset for arrow wedge
dx = dx / cp_distance * head_dist
dy = dy / cp_distance * head_dist
dx1, dy1 = cos_t * dx + sin_t * dy, -sin_t * dx + cos_t * dy
dx2, dy2 = cos_t * dx - sin_t * dy, sin_t * dx + cos_t * dy
vertices_arrow = [(x1 + ddx + dx1, y1 + ddy + dy1),
(x1 + ddx, y1 + ddy),
(x1 + ddx + dx2, y1 + ddy + dy2)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow, ddx, ddy
def transmute(self, path, mutation_size, linewidth):
head_length, head_width = self.head_length * mutation_size, \
self.head_width * mutation_size
head_dist = math.sqrt(head_length ** 2 + head_width ** 2)
cos_t, sin_t = head_length / head_dist, head_width / head_dist
# begin arrow
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
if self.beginarrow:
verticesA, codesA, ddxA, ddyA = \
self._get_arrow_wedge(x1, y1, x0, y0,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesA, codesA = [], []
ddxA, ddyA = 0., 0.
# end arrow
x2, y2 = path.vertices[-2]
x3, y3 = path.vertices[-1]
if self.endarrow:
verticesB, codesB, ddxB, ddyB = \
self._get_arrow_wedge(x2, y2, x3, y3,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesB, codesB = [], []
ddxB, ddyB = 0., 0.
# this simple code will not work if ddx, ddy is greater than
# separation bettern vertices.
_path = [Path(np.concatenate([[(x0 + ddxA, y0 + ddyA)],
path.vertices[1:-1],
[(x3 + ddxB, y3 + ddyB)]]),
path.codes)]
_fillable = [False]
if self.beginarrow:
if self.fillbegin:
p = np.concatenate([verticesA, [verticesA[0],
verticesA[0]], ])
c = np.concatenate([codesA, [Path.LINETO, Path.CLOSEPOLY]])
_path.append(Path(p, c))
_fillable.append(True)
else:
_path.append(Path(verticesA, codesA))
_fillable.append(False)
if self.endarrow:
if self.fillend:
_fillable.append(True)
p = np.concatenate([verticesB, [verticesB[0],
verticesB[0]], ])
c = np.concatenate([codesB, [Path.LINETO, Path.CLOSEPOLY]])
_path.append(Path(p, c))
else:
_fillable.append(False)
_path.append(Path(verticesB, codesB))
return _path, _fillable
class Curve(_Curve):
"""
A simple curve without any arrow head.
"""
def __init__(self):
super(ArrowStyle.Curve, self).__init__(
beginarrow=False, endarrow=False)
_style_list["-"] = Curve
class CurveA(_Curve):
"""
An arrow with a head at its begin point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveA, self).__init__(
beginarrow=True, endarrow=False,
head_length=head_length, head_width=head_width)
_style_list["<-"] = CurveA
class CurveB(_Curve):
"""
An arrow with a head at its end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveB, self).__init__(
beginarrow=False, endarrow=True,
head_length=head_length, head_width=head_width)
_style_list["->"] = CurveB
class CurveAB(_Curve):
"""
An arrow with heads both at the begin and the end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveAB, self).__init__(
beginarrow=True, endarrow=True,
head_length=head_length, head_width=head_width)
_style_list["<->"] = CurveAB
class CurveFilledA(_Curve):
"""
An arrow with filled triangle head at the begin.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledA, self).__init__(
beginarrow=True, endarrow=False,
fillbegin=True, fillend=False,
head_length=head_length, head_width=head_width)
_style_list["<|-"] = CurveFilledA
class CurveFilledB(_Curve):
"""
An arrow with filled triangle head at the end.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledB, self).__init__(
beginarrow=False, endarrow=True,
fillbegin=False, fillend=True,
head_length=head_length, head_width=head_width)
_style_list["-|>"] = CurveFilledB
class CurveFilledAB(_Curve):
"""
An arrow with filled triangle heads both at the begin and the end
point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledAB, self).__init__(
beginarrow=True, endarrow=True,
fillbegin=True, fillend=True,
head_length=head_length, head_width=head_width)
_style_list["<|-|>"] = CurveFilledAB
class _Bracket(_Base):
def __init__(self, bracketA=None, bracketB=None,
widthA=1., widthB=1.,
lengthA=0.2, lengthB=0.2,
angleA=None, angleB=None,
scaleA=None, scaleB=None
):
self.bracketA, self.bracketB = bracketA, bracketB
self.widthA, self.widthB = widthA, widthB
self.lengthA, self.lengthB = lengthA, lengthB
self.angleA, self.angleB = angleA, angleB
self.scaleA, self.scaleB = scaleA, scaleB
def _get_bracket(self, x0, y0,
cos_t, sin_t, width, length,
):
# arrow from x0, y0 to x1, y1
from matplotlib.bezier import get_normal_points
x1, y1, x2, y2 = get_normal_points(x0, y0, cos_t, sin_t, width)
dx, dy = length * cos_t, length * sin_t
vertices_arrow = [(x1 + dx, y1 + dy),
(x1, y1),
(x2, y2),
(x2 + dx, y2 + dy)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow
def transmute(self, path, mutation_size, linewidth):
if self.scaleA is None:
scaleA = mutation_size
else:
scaleA = self.scaleA
if self.scaleB is None:
scaleB = mutation_size
else:
scaleB = self.scaleB
vertices_list, codes_list = [], []
if self.bracketA:
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesA, codesA = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthA * scaleA,
self.lengthA * scaleA)
vertices_list.append(verticesA)
codes_list.append(codesA)
vertices_list.append(path.vertices)
codes_list.append(path.codes)
if self.bracketB:
x0, y0 = path.vertices[-1]
x1, y1 = path.vertices[-2]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesB, codesB = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthB * scaleB,
self.lengthB * scaleB)
vertices_list.append(verticesB)
codes_list.append(codesB)
vertices = np.concatenate(vertices_list)
codes = np.concatenate(codes_list)
p = Path(vertices, codes)
return p, False
class BracketAB(_Bracket):
"""
An arrow with a bracket(]) at both ends.
"""
def __init__(self,
widthA=1., lengthA=0.2, angleA=None,
widthB=1., lengthB=0.2, angleB=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BracketAB, self).__init__(
True, True, widthA=widthA, lengthA=lengthA,
angleA=angleA, widthB=widthB, lengthB=lengthB,
angleB=angleB)
_style_list["]-["] = BracketAB
class BracketA(_Bracket):
"""
An arrow with a bracket(]) at its end.
"""
def __init__(self, widthA=1., lengthA=0.2, angleA=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
"""
super(ArrowStyle.BracketA, self).__init__(True, None,
widthA=widthA, lengthA=lengthA, angleA=angleA)
_style_list["]-"] = BracketA
class BracketB(_Bracket):
"""
An arrow with a bracket([) at its end.
"""
def __init__(self, widthB=1., lengthB=0.2, angleB=None):
"""
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BracketB, self).__init__(None, True,
widthB=widthB, lengthB=lengthB, angleB=angleB)
_style_list["-["] = BracketB
class BarAB(_Bracket):
"""
An arrow with a bar(|) at both ends.
"""
def __init__(self,
widthA=1., angleA=None,
widthB=1., angleB=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BarAB, self).__init__(
True, True, widthA=widthA, lengthA=0, angleA=angleA,
widthB=widthB, lengthB=0, angleB=angleB)
_style_list["|-|"] = BarAB
class Simple(_Base):
"""
A simple arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.5, head_width=.5, tail_width=.2):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Simple, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
in_f = inside_circle(x2, y2, head_length)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
from .bezier import NonIntersectingPathException
try:
arrow_out, arrow_in = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_in = [(x0, y0), (x1n, y1n), (x2, y2)]
arrow_out = None
# head
head_width = self.head_width * mutation_size
head_left, head_right = \
make_wedged_bezier2(arrow_in, head_width / 2.,
wm=.5)
# tail
if arrow_out is not None:
tail_width = self.tail_width * mutation_size
tail_left, tail_right = get_parallels(arrow_out,
tail_width / 2.)
#head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_right[0]),
(Path.CLOSEPOLY, tail_right[0]),
]
else:
patch_path = [(Path.MOVETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.CLOSEPOLY, head_left[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["simple"] = Simple
class Fancy(_Base):
"""
A fancy arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.4, head_width=.4, tail_width=.4):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Fancy, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
from .bezier import NonIntersectingPathException
# path for head
in_f = inside_circle(x2, y2, head_length)
try:
path_out, path_in = \
split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_path = [(x0, y0), (x1n, y1n), (x2, y2)]
path_head = arrow_path
else:
path_head = path_in
# path for head
in_f = inside_circle(x2, y2, head_length * .8)
path_out, path_in = \
split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01)
path_tail = path_out
# head
head_width = self.head_width * mutation_size
head_l, head_r = make_wedged_bezier2(path_head,
head_width / 2.,
wm=.6)
# tail
tail_width = self.tail_width * mutation_size
tail_left, tail_right = make_wedged_bezier2(path_tail,
tail_width * .5,
w1=1., wm=0.6, w2=0.3)
# path for head
in_f = inside_circle(x0, y0, tail_width * .3)
path_in, path_out = \
split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01)
tail_start = path_in[-1]
head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_start),
(Path.LINETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_start),
(Path.CLOSEPOLY, tail_start),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["fancy"] = Fancy
class Wedge(_Base):
"""
Wedge(?) shape. Only wokrs with a quadratic bezier curve. The
begin point has a width of the tail_width and the end point has a
width of 0. At the middle, the width is shrink_factor*tail_width.
"""
def __init__(self, tail_width=.3, shrink_factor=0.5):
"""
*tail_width*
width of the tail
*shrink_factor*
fraction of the arrow width at the middle point
"""
self.tail_width = tail_width
self.shrink_factor = shrink_factor
super(ArrowStyle.Wedge, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
b_plus, b_minus = make_wedged_bezier2(
arrow_path,
self.tail_width * mutation_size / 2.,
wm=self.shrink_factor)
patch_path = [(Path.MOVETO, b_plus[0]),
(Path.CURVE3, b_plus[1]),
(Path.CURVE3, b_plus[2]),
(Path.LINETO, b_minus[2]),
(Path.CURVE3, b_minus[1]),
(Path.CURVE3, b_minus[0]),
(Path.CLOSEPOLY, b_minus[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["wedge"] = Wedge
if __doc__:
__doc__ = cbook.dedent(__doc__) % \
{"AvailableArrowstyles": _pprint_styles(_style_list)}
docstring.interpd.update(
AvailableArrowstyles=_pprint_styles(ArrowStyle._style_list),
AvailableConnectorstyles=_pprint_styles(ConnectionStyle._style_list),
)
class FancyArrowPatch(Patch):
"""
A fancy arrow patch. It draws an arrow using the :class:ArrowStyle.
"""
def __str__(self):
if self._posA_posB is not None:
(x1, y1), (x2, y2) = self._posA_posB
return self.__class__.__name__ \
+ "(%g,%g->%g,%g)" % (x1, y1, x2, y2)
else:
return self.__class__.__name__ \
+ "(%s)" % (str(self._path_original),)
@docstring.dedent_interpd
def __init__(self, posA=None, posB=None,
path=None,
arrowstyle="simple",
arrow_transmuter=None,
connectionstyle="arc3",
connector=None,
patchA=None,
patchB=None,
shrinkA=2.,
shrinkB=2.,
mutation_scale=1.,
mutation_aspect=None,
dpi_cor=1.,
**kwargs):
"""
If *posA* and *posB* is given, a path connecting two point are
created according to the connectionstyle. The path will be
clipped with *patchA* and *patchB* and further shirnked by
*shrinkA* and *shrinkB*. An arrow is drawn along this
resulting path using the *arrowstyle* parameter. If *path*
provided, an arrow is drawn along this path and *patchA*,
*patchB*, *shrinkA*, and *shrinkB* are ignored.
The *connectionstyle* describes how *posA* and *posB* are
connected. It can be an instance of the ConnectionStyle class
(matplotlib.patches.ConnectionStlye) or a string of the
connectionstyle name, with optional comma-separated
attributes. The following connection styles are available.
%(AvailableConnectorstyles)s
The *arrowstyle* describes how the fancy arrow will be
drawn. It can be string of the available arrowstyle names,
with optional comma-separated attributes, or one of the
ArrowStyle instance. The optional attributes are meant to be
scaled with the *mutation_scale*. The following arrow styles are
available.
%(AvailableArrowstyles)s
*mutation_scale* : a value with which attributes of arrowstyle
(e.g., head_length) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
if posA is not None and posB is not None and path is None:
self._posA_posB = [posA, posB]
if connectionstyle is None:
connectionstyle = "arc3"
self.set_connectionstyle(connectionstyle)
elif posA is None and posB is None and path is not None:
self._posA_posB = None
self._connetors = None
else:
raise ValueError("either posA and posB, or path need to provided")
self.patchA = patchA
self.patchB = patchB
self.shrinkA = shrinkA
self.shrinkB = shrinkB
Patch.__init__(self, **kwargs)
self._path_original = path
self.set_arrowstyle(arrowstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
self.set_dpi_cor(dpi_cor)
#self._draw_in_display_coordinate = True
def set_dpi_cor(self, dpi_cor):
"""
dpi_cor is currently used for linewidth-related things and
shink factor. Mutation scale is not affected by this.
"""
self._dpi_cor = dpi_cor
def get_dpi_cor(self):
"""
dpi_cor is currently used for linewidth-related things and
shink factor. Mutation scale is not affected by this.
"""
return self._dpi_cor
def set_positions(self, posA, posB):
""" set the begin end end positions of the connecting
path. Use current vlaue if None.
"""
if posA is not None:
self._posA_posB[0] = posA
if posB is not None:
self._posA_posB[1] = posB
def set_patchA(self, patchA):
""" set the begin patch.
"""
self.patchA = patchA
def set_patchB(self, patchB):
""" set the begin patch
"""
self.patchB = patchB
def set_connectionstyle(self, connectionstyle, **kw):
"""
Set the connection style.
*connectionstyle* can be a string with connectionstyle name with
optional comma-separated attributes. Alternatively, the attrs can be
probided as keywords.
set_connectionstyle("arc,angleA=0,armA=30,rad=10")
set_connectionstyle("arc", angleA=0,armA=30,rad=10)
Old attrs simply are forgotten.
Without argument (or with connectionstyle=None), return
available styles as a list of strings.
"""
if connectionstyle is None:
return ConnectionStyle.pprint_styles()
if isinstance(connectionstyle, ConnectionStyle._Base):
self._connector = connectionstyle
elif six.callable(connectionstyle):
# we may need check the calling convention of the given function
self._connector = connectionstyle
else:
self._connector = ConnectionStyle(connectionstyle, **kw)
def get_connectionstyle(self):
"""
Return the ConnectionStyle instance
"""
return self._connector
def set_arrowstyle(self, arrowstyle=None, **kw):
"""
Set the arrow style.
*arrowstyle* can be a string with arrowstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords.
set_arrowstyle("Fancy,head_length=0.2")
set_arrowstyle("fancy", head_length=0.2)
Old attrs simply are forgotten.
Without argument (or with arrowstyle=None), return
available box styles as a list of strings.
"""
if arrowstyle is None:
return ArrowStyle.pprint_styles()
if isinstance(arrowstyle, ArrowStyle._Base):
self._arrow_transmuter = arrowstyle
else:
self._arrow_transmuter = ArrowStyle(arrowstyle, **kw)
def get_arrowstyle(self):
"""
Return the arrowstyle object
"""
return self._arrow_transmuter
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale = scale
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect = aspect
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_path(self):
"""
return the path of the arrow in the data coordinate. Use
get_path_in_displaycoord() method to retrieve the arrow path
in the display coord.
"""
_path, fillable = self.get_path_in_displaycoord()
if cbook.iterable(fillable):
_path = concatenate_paths(_path)
return self.get_transform().inverted().transform_path(_path)
def get_path_in_displaycoord(self):
"""
Return the mutated path of the arrow in the display coord
"""
dpi_cor = self.get_dpi_cor()
if self._posA_posB is not None:
posA = self.get_transform().transform_point(self._posA_posB[0])
posB = self.get_transform().transform_point(self._posA_posB[1])
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor,
shrinkB=self.shrinkB * dpi_cor
)
else:
_path = self.get_transform().transform_path(self._path_original)
_path, fillable = self.get_arrowstyle()(_path,
self.get_mutation_scale(),
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect()
)
#if not fillable:
# self._fill = False
return _path, fillable
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group('patch', self.get_gid())
gc = renderer.new_gc()
gc.set_foreground(self._edgecolor, isRGBA=True)
lw = self._linewidth
if self._edgecolor[3] == 0:
lw = 0
gc.set_linewidth(lw)
gc.set_linestyle(self._linestyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_capstyle('round')
gc.set_snap(self.get_snap())
rgbFace = self._facecolor
if rgbFace[3] == 0:
rgbFace = None # (some?) renderers expect this as no-fill signal
gc.set_alpha(self._alpha)
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
# FIXME : dpi_cor is for the dpi-dependecy of the
# linewidth. There could be room for improvement.
#
#dpi_cor = renderer.points_to_pixels(1.)
self.set_dpi_cor(renderer.points_to_pixels(1.))
path, fillable = self.get_path_in_displaycoord()
if not cbook.iterable(fillable):
path = [path]
fillable = [fillable]
affine = transforms.IdentityTransform()
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
for p, f in zip(path, fillable):
if f:
renderer.draw_path(gc, p, affine, rgbFace)
else:
renderer.draw_path(gc, p, affine, None)
gc.restore()
renderer.close_group('patch')
class ConnectionPatch(FancyArrowPatch):
"""
A :class:`~matplotlib.patches.ConnectionPatch` class is to make
connecting lines between two points (possibly in different axes).
"""
def __str__(self):
return "ConnectionPatch((%g,%g),(%g,%g))" % \
(self.xy1[0], self.xy1[1], self.xy2[0], self.xy2[1])
@docstring.dedent_interpd
def __init__(self, xyA, xyB, coordsA, coordsB=None,
axesA=None, axesB=None,
arrowstyle="-",
arrow_transmuter=None,
connectionstyle="arc3",
connector=None,
patchA=None,
patchB=None,
shrinkA=0.,
shrinkB=0.,
mutation_scale=10.,
mutation_aspect=None,
clip_on=False,
dpi_cor=1.,
**kwargs):
"""
Connect point *xyA* in *coordsA* with point *xyB* in *coordsB*
Valid keys are
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ======================================================
*coordsA* and *coordsB* are strings that indicate the
coordinates of *xyA* and *xyB*.
================= ===================================================
Property Description
================= ===================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0,0 is lower left of figure and 1,1 is upper, right
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' 0,1 is lower left of axes and 1,1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' Specify an offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you
are using a polar axes, you do not need
to specify polar for the coordinate
system since that is the native "data" coordinate
system.
================= ===================================================
"""
if coordsB is None:
coordsB = coordsA
# we'll draw ourself after the artist we annotate by default
self.xy1 = xyA
self.xy2 = xyB
self.coords1 = coordsA
self.coords2 = coordsB
self.axesA = axesA
self.axesB = axesB
FancyArrowPatch.__init__(self,
posA=(0, 0), posB=(1, 1),
arrowstyle=arrowstyle,
arrow_transmuter=arrow_transmuter,
connectionstyle=connectionstyle,
connector=connector,
patchA=patchA,
patchB=patchB,
shrinkA=shrinkA,
shrinkB=shrinkB,
mutation_scale=mutation_scale,
mutation_aspect=mutation_aspect,
clip_on=clip_on,
dpi_cor=dpi_cor,
**kwargs)
# if True, draw annotation only if self.xy is inside the axes
self._annotation_clip = None
def _get_xy(self, x, y, s, axes=None):
"""
caculate the pixel position of given point
"""
if axes is None:
axes = self.axes
if s == 'data':
trans = axes.transData
x = float(self.convert_xunits(x))
y = float(self.convert_yunits(y))
return trans.transform_point((x, y))
elif s == 'offset points':
# convert the data point
dx, dy = self.xy
# prevent recursion
if self.xycoords == 'offset points':
return self._get_xy(dx, dy, 'data')
dx, dy = self._get_xy(dx, dy, self.xycoords)
# convert the offset
dpi = self.figure.get_dpi()
x *= dpi / 72.
y *= dpi / 72.
# add the offset to the data point
x += dx
y += dy
return x, y
elif s == 'polar':
theta, r = x, y
x = r * np.cos(theta)
y = r * np.sin(theta)
trans = axes.transData
return trans.transform_point((x, y))
elif s == 'figure points':
# points from the lower left corner of the figure
dpi = self.figure.dpi
l, b, w, h = self.figure.bbox.bounds
r = l + w
t = b + h
x *= dpi / 72.
y *= dpi / 72.
if x < 0:
x = r + x
if y < 0:
y = t + y
return x, y
elif s == 'figure pixels':
# pixels from the lower left corner of the figure
l, b, w, h = self.figure.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x
if y < 0:
y = t + y
return x, y
elif s == 'figure fraction':
# (0,0) is lower left, (1,1) is upper right of figure
trans = self.figure.transFigure
return trans.transform_point((x, y))
elif s == 'axes points':
# points from the lower left corner of the axes
dpi = self.figure.dpi
l, b, w, h = axes.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x * dpi / 72.
else:
x = l + x * dpi / 72.
if y < 0:
y = t + y * dpi / 72.
else:
y = b + y * dpi / 72.
return x, y
elif s == 'axes pixels':
#pixels from the lower left corner of the axes
l, b, w, h = axes.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x
else:
x = l + x
if y < 0:
y = t + y
else:
y = b + y
return x, y
elif s == 'axes fraction':
#(0,0) is lower left, (1,1) is upper right of axes
trans = axes.transAxes
return trans.transform_point((x, y))
def set_annotation_clip(self, b):
"""
set *annotation_clip* attribute.
* True: the annotation will only be drawn when self.xy is inside the
axes.
* False: the annotation will always be drawn regardless of its
position.
* None: the self.xy will be checked only if *xycoords* is "data"
"""
self._annotation_clip = b
def get_annotation_clip(self):
"""
Return *annotation_clip* attribute.
See :meth:`set_annotation_clip` for the meaning of return values.
"""
return self._annotation_clip
def get_path_in_displaycoord(self):
"""
Return the mutated path of the arrow in the display coord
"""
dpi_cor = self.get_dpi_cor()
x, y = self.xy1
posA = self._get_xy(x, y, self.coords1, self.axesA)
x, y = self.xy2
posB = self._get_xy(x, y, self.coords2, self.axesB)
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor,
shrinkB=self.shrinkB * dpi_cor
)
_path, fillable = self.get_arrowstyle()(_path,
self.get_mutation_scale(),
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect()
)
return _path, fillable
def _check_xy(self, renderer):
"""
check if the annotation need to
be drawn.
"""
b = self.get_annotation_clip()
if b or (b is None and self.coords1 == "data"):
x, y = self.xy1
xy_pixel = self._get_xy(x, y, self.coords1, self.axesA)
if not self.axes.contains_point(xy_pixel):
return False
if b or (b is None and self.coords2 == "data"):
x, y = self.xy2
xy_pixel = self._get_xy(x, y, self.coords2, self.axesB)
if self.axesB is None:
axes = self.axes
else:
axes = self.axesB
if not axes.contains_point(xy_pixel):
return False
return True
def draw(self, renderer):
"""
Draw.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
if not self._check_xy(renderer):
return
FancyArrowPatch.draw(self, renderer)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.