repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
lpeska/BRDTI | cmf.py | 1 | 4613 | '''
We base the CMF implementation on the one from PyDTI project, https://github.com/stephenliu0423/PyDTI, changes were made to the evaluation procedure
[1] X. Zheng, H. Ding, H. Mamitsuka, and S. Zhu, "Collaborative matrix factorization with multiple similarities for predicting drug-target interaction", KDD, 2013.
'''
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.metrics import auc
from functions import normalized_discounted_cummulative_gain
class CMF:
def __init__(self, K=10, lambda_l=0.01, lambda_d=0.01, lambda_t=0.01, max_iter=100):
self.K = K
self.lambda_l = lambda_l
self.lambda_d = lambda_d
self.lambda_t = lambda_t
self.max_iter = max_iter
def fix_model(self, W, intMat, drugMat, targetMat, seed):
self.num_drugs, self.num_targets = intMat.shape
self.drugMat, self.targetMat = drugMat, targetMat
x, y = np.where(W > 0)
self.train_drugs = set(x.tolist())
self.train_targets = set(y.tolist())
if seed is None:
self.U = np.sqrt(1/float(self.K))*np.random.normal(size=(self.num_drugs, self.K))
self.V = np.sqrt(1/float(self.K))*np.random.normal(size=(self.num_targets, self.K))
else:
prng = np.random.RandomState(seed)
self.U = np.sqrt(1/float(self.K))*prng.normal(size=(self.num_drugs, self.K))
self.V = np.sqrt(1/float(self.K))*prng.normal(size=(self.num_targets, self.K))
self.ones = np.identity(self.K)
last_loss = self.compute_loss(W, intMat, drugMat, targetMat)
WR = W*intMat
for t in xrange(self.max_iter):
self.U = self.als_update(self.U, self.V, W, WR, drugMat, self.lambda_l, self.lambda_d)
self.V = self.als_update(self.V, self.U, W.T, WR.T, targetMat, self.lambda_l, self.lambda_t)
curr_loss = self.compute_loss(W, intMat, drugMat, targetMat)
delta_loss = (curr_loss-last_loss)/last_loss
# print "Epoach:%s, Curr_loss:%s, Delta_loss:%s" % (t+1, curr_loss, delta_loss)
if abs(delta_loss) < 1e-6:
break
last_loss = curr_loss
def als_update(self, U, V, W, R, S, lambda_l, lambda_d):
X = R.dot(V) + 2*lambda_d*S.dot(U)
Y = 2*lambda_d*np.dot(U.T, U)
Z = lambda_d*(np.diag(S)-np.sum(np.square(U), axis=1))
U0 = np.zeros(U.shape)
D = np.dot(V.T, V)
m, n = W.shape
for i in xrange(m):
# A = np.dot(V.T, np.diag(W[i, :]))
# B = A.dot(V) + Y + (lambda_l+Z[i])*self.ones
ii = np.where(W[i, :] > 0)[0]
if ii.size == 0:
B = Y + (lambda_l+Z[i])*self.ones
elif ii.size == n:
B = D + Y + (lambda_l+Z[i])*self.ones
else:
A = np.dot(V[ii, :].T, V[ii, :])
B = A + Y + (lambda_l+Z[i])*self.ones
U0[i, :] = X[i, :].dot(np.linalg.inv(B))
return U0
def compute_loss(self, W, intMat, drugMat, targetMat):
loss = np.linalg.norm(W * (intMat - np.dot(self.U, self.V.T)), "fro")**(2)
loss += self.lambda_l*(np.linalg.norm(self.U, "fro")**(2)+np.linalg.norm(self.V, "fro")**(2))
loss += self.lambda_d*np.linalg.norm(drugMat-self.U.dot(self.U.T), "fro")**(2)+self.lambda_t*np.linalg.norm(targetMat-self.V.dot(self.V.T), "fro")**(2)
return 0.5*loss
def evaluation(self, test_data, test_label):
ii, jj = test_data[:, 0], test_data[:, 1]
scores = np.sum(self.U[ii, :]*self.V[jj, :], axis=1)
self.scores = scores
x, y = test_data[:, 0], test_data[:, 1]
test_data_T = np.column_stack((y,x))
ndcg = normalized_discounted_cummulative_gain(test_data, test_label, np.array(scores))
ndcg_inv = normalized_discounted_cummulative_gain(test_data_T, test_label, np.array(scores))
prec, rec, thr = precision_recall_curve(test_label, scores)
aupr_val = auc(rec, prec)
fpr, tpr, thr = roc_curve(test_label, scores)
auc_val = auc(fpr, tpr)
#!!!!we should distinguish here between inverted and not inverted methods nDCGs!!!!
return aupr_val, auc_val, ndcg, ndcg_inv
def predict_scores(self, test_data, N):
inx = np.array(test_data)
return np.sum(self.U[inx[:, 0], :]*self.V[inx[:, 1], :], axis=1)
def __str__(self):
return "Model: CMF, K:%s, lambda_l:%s, lambda_d:%s, lambda_t:%s, max_iter:%s" % (self.K, self.lambda_l, self.lambda_d, self.lambda_t, self.max_iter)
| gpl-2.0 |
jniediek/mne-python | mne/tests/test_label.py | 3 | 33801 | import os
import os.path as op
import shutil
import glob
import warnings
import numpy as np
from scipy import sparse
from numpy.testing import assert_array_equal, assert_array_almost_equal
from nose.tools import assert_equal, assert_true, assert_false, assert_raises
from mne.datasets import testing
from mne import (read_label, stc_to_label, read_source_estimate,
read_source_spaces, grow_labels, read_labels_from_annot,
write_labels_to_annot, split_label, spatial_tris_connectivity,
read_surface)
from mne.label import Label, _blend_colors, label_sign_flip
from mne.utils import (_TempDir, requires_sklearn, get_subjects_dir,
run_tests_if_main, slow_test)
from mne.fixes import assert_is, assert_is_not
from mne.label import _n_colors
from mne.source_space import SourceSpaces
from mne.source_estimate import mesh_edges
from mne.externals.six import string_types
from mne.externals.six.moves import cPickle as pickle
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
src_fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
stc_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-lh.stc')
real_label_fname = op.join(data_path, 'MEG', 'sample', 'labels',
'Aud-lh.label')
real_label_rh_fname = op.join(data_path, 'MEG', 'sample', 'labels',
'Aud-rh.label')
v1_label_fname = op.join(subjects_dir, 'sample', 'label', 'lh.V1.label')
fwd_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
src_bad_fname = op.join(data_path, 'subjects', 'fsaverage', 'bem',
'fsaverage-ico-5-src.fif')
label_dir = op.join(subjects_dir, 'sample', 'label', 'aparc')
test_path = op.join(op.split(__file__)[0], '..', 'io', 'tests', 'data')
label_fname = op.join(test_path, 'test-lh.label')
label_rh_fname = op.join(test_path, 'test-rh.label')
# This code was used to generate the "fake" test labels:
# for hemi in ['lh', 'rh']:
# label = Label(np.unique((np.random.rand(100) * 10242).astype(int)),
# hemi=hemi, comment='Test ' + hemi, subject='fsaverage')
# label.save(op.join(test_path, 'test-%s.label' % hemi))
# XXX : this was added for backward compat and keep the old test_label_in_src
def _stc_to_label(stc, src, smooth, subjects_dir=None):
"""Compute a label from the non-zero sources in an stc object.
Parameters
----------
stc : SourceEstimate
The source estimates.
src : SourceSpaces | str | None
The source space over which the source estimates are defined.
If it's a string it should the subject name (e.g. fsaverage).
Can be None if stc.subject is not None.
smooth : int
Number of smoothing iterations.
subjects_dir : str | None
Path to SUBJECTS_DIR if it is not set in the environment.
Returns
-------
labels : list of Labels | list of list of Labels
The generated labels. If connected is False, it returns
a list of Labels (one per hemisphere). If no Label is available
in a hemisphere, None is returned. If connected is True,
it returns for each hemisphere a list of connected labels
ordered in decreasing order depending of the maximum value in the stc.
If no Label is available in an hemisphere, an empty list is returned.
"""
src = stc.subject if src is None else src
if isinstance(src, string_types):
subject = src
else:
subject = stc.subject
if isinstance(src, string_types):
subjects_dir = get_subjects_dir(subjects_dir)
surf_path_from = op.join(subjects_dir, src, 'surf')
rr_lh, tris_lh = read_surface(op.join(surf_path_from,
'lh.white'))
rr_rh, tris_rh = read_surface(op.join(surf_path_from,
'rh.white'))
rr = [rr_lh, rr_rh]
tris = [tris_lh, tris_rh]
else:
if not isinstance(src, SourceSpaces):
raise TypeError('src must be a string or a set of source spaces')
if len(src) != 2:
raise ValueError('source space should contain the 2 hemispheres')
rr = [1e3 * src[0]['rr'], 1e3 * src[1]['rr']]
tris = [src[0]['tris'], src[1]['tris']]
labels = []
cnt = 0
for hemi_idx, (hemi, this_vertno, this_tris, this_rr) in enumerate(
zip(['lh', 'rh'], stc.vertices, tris, rr)):
this_data = stc.data[cnt:cnt + len(this_vertno)]
e = mesh_edges(this_tris)
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
clusters = [this_vertno[np.any(this_data, axis=1)]]
cnt += len(this_vertno)
clusters = [c for c in clusters if len(c) > 0]
if len(clusters) == 0:
this_labels = None
else:
this_labels = []
colors = _n_colors(len(clusters))
for c, color in zip(clusters, colors):
idx_use = c
for k in range(smooth):
e_use = e[:, idx_use]
data1 = e_use * np.ones(len(idx_use))
idx_use = np.where(data1)[0]
label = Label(idx_use, this_rr[idx_use], None, hemi,
'Label from stc', subject=subject,
color=color)
this_labels.append(label)
this_labels = this_labels[0]
labels.append(this_labels)
return labels
def assert_labels_equal(l0, l1, decimal=5, comment=True, color=True):
if comment:
assert_equal(l0.comment, l1.comment)
if color:
assert_equal(l0.color, l1.color)
for attr in ['hemi', 'subject']:
attr0 = getattr(l0, attr)
attr1 = getattr(l1, attr)
msg = "label.%s: %r != %r" % (attr, attr0, attr1)
assert_equal(attr0, attr1, msg)
for attr in ['vertices', 'pos', 'values']:
a0 = getattr(l0, attr)
a1 = getattr(l1, attr)
assert_array_almost_equal(a0, a1, decimal)
def test_copy():
"""Test label copying"""
label = read_label(label_fname)
label_2 = label.copy()
label_2.pos += 1
assert_array_equal(label.pos, label_2.pos - 1)
def test_label_subject():
"""Test label subject name extraction
"""
label = read_label(label_fname)
assert_is(label.subject, None)
assert_true('unknown' in repr(label))
label = read_label(label_fname, subject='fsaverage')
assert_true(label.subject == 'fsaverage')
assert_true('fsaverage' in repr(label))
def test_label_addition():
"""Test label addition
"""
pos = np.random.RandomState(0).rand(10, 3)
values = np.arange(10.) / 10
idx0 = list(range(7))
idx1 = list(range(7, 10)) # non-overlapping
idx2 = list(range(5, 10)) # overlapping
l0 = Label(idx0, pos[idx0], values[idx0], 'lh', color='red')
l1 = Label(idx1, pos[idx1], values[idx1], 'lh')
l2 = Label(idx2, pos[idx2], values[idx2], 'lh', color=(0, 1, 0, .5))
assert_equal(len(l0), len(idx0))
l_good = l0.copy()
l_good.subject = 'sample'
l_bad = l1.copy()
l_bad.subject = 'foo'
assert_raises(ValueError, l_good.__add__, l_bad)
assert_raises(TypeError, l_good.__add__, 'foo')
assert_raises(ValueError, l_good.__sub__, l_bad)
assert_raises(TypeError, l_good.__sub__, 'foo')
# adding non-overlapping labels
l01 = l0 + l1
assert_equal(len(l01), len(l0) + len(l1))
assert_array_equal(l01.values[:len(l0)], l0.values)
assert_equal(l01.color, l0.color)
# subtraction
assert_labels_equal(l01 - l0, l1, comment=False, color=False)
assert_labels_equal(l01 - l1, l0, comment=False, color=False)
# adding overlappig labels
l = l0 + l2
i0 = np.where(l0.vertices == 6)[0][0]
i2 = np.where(l2.vertices == 6)[0][0]
i = np.where(l.vertices == 6)[0][0]
assert_equal(l.values[i], l0.values[i0] + l2.values[i2])
assert_equal(l.values[0], l0.values[0])
assert_array_equal(np.unique(l.vertices), np.unique(idx0 + idx2))
assert_equal(l.color, _blend_colors(l0.color, l2.color))
# adding lh and rh
l2.hemi = 'rh'
# this now has deprecated behavior
bhl = l0 + l2
assert_equal(bhl.hemi, 'both')
assert_equal(len(bhl), len(l0) + len(l2))
assert_equal(bhl.color, l.color)
assert_true('BiHemiLabel' in repr(bhl))
# subtraction
assert_labels_equal(bhl - l0, l2)
assert_labels_equal(bhl - l2, l0)
bhl2 = l1 + bhl
assert_labels_equal(bhl2.lh, l01)
assert_equal(bhl2.color, _blend_colors(l1.color, bhl.color))
assert_array_equal((l2 + bhl).rh.vertices, bhl.rh.vertices) # rh label
assert_array_equal((bhl + bhl).lh.vertices, bhl.lh.vertices)
assert_raises(TypeError, bhl.__add__, 5)
# subtraction
bhl_ = bhl2 - l1
assert_labels_equal(bhl_.lh, bhl.lh, comment=False, color=False)
assert_labels_equal(bhl_.rh, bhl.rh)
assert_labels_equal(bhl2 - l2, l0 + l1)
assert_labels_equal(bhl2 - l1 - l0, l2)
bhl_ = bhl2 - bhl2
assert_array_equal(bhl_.vertices, [])
@testing.requires_testing_data
def test_label_in_src():
"""Test label in src"""
src = read_source_spaces(src_fname)
label = read_label(v1_label_fname)
# construct label from source space vertices
vert_in_src = np.intersect1d(label.vertices, src[0]['vertno'], True)
where = np.in1d(label.vertices, vert_in_src)
pos_in_src = label.pos[where]
values_in_src = label.values[where]
label_src = Label(vert_in_src, pos_in_src, values_in_src,
hemi='lh').fill(src)
# check label vertices
vertices_status = np.in1d(src[0]['nearest'], label.vertices)
vertices_in = np.nonzero(vertices_status)[0]
vertices_out = np.nonzero(np.logical_not(vertices_status))[0]
assert_array_equal(label_src.vertices, vertices_in)
assert_array_equal(np.in1d(vertices_out, label_src.vertices), False)
# check values
value_idx = np.digitize(src[0]['nearest'][vertices_in], vert_in_src, True)
assert_array_equal(label_src.values, values_in_src[value_idx])
# test exception
vertices = np.append([-1], vert_in_src)
assert_raises(ValueError, Label(vertices, hemi='lh').fill, src)
@testing.requires_testing_data
def test_label_io_and_time_course_estimates():
"""Test IO for label + stc files
"""
stc = read_source_estimate(stc_fname)
label = read_label(real_label_fname)
stc_label = stc.in_label(label)
assert_true(len(stc_label.times) == stc_label.data.shape[1])
assert_true(len(stc_label.vertices[0]) == stc_label.data.shape[0])
@testing.requires_testing_data
def test_label_io():
"""Test IO of label files
"""
tempdir = _TempDir()
label = read_label(label_fname)
# label attributes
assert_equal(label.name, 'test-lh')
assert_is(label.subject, None)
assert_is(label.color, None)
# save and reload
label.save(op.join(tempdir, 'foo'))
label2 = read_label(op.join(tempdir, 'foo-lh.label'))
assert_labels_equal(label, label2)
# pickling
dest = op.join(tempdir, 'foo.pickled')
with open(dest, 'wb') as fid:
pickle.dump(label, fid, pickle.HIGHEST_PROTOCOL)
with open(dest, 'rb') as fid:
label2 = pickle.load(fid)
assert_labels_equal(label, label2)
def _assert_labels_equal(labels_a, labels_b, ignore_pos=False):
"""Make sure two sets of labels are equal"""
for label_a, label_b in zip(labels_a, labels_b):
assert_array_equal(label_a.vertices, label_b.vertices)
assert_true(label_a.name == label_b.name)
assert_true(label_a.hemi == label_b.hemi)
if not ignore_pos:
assert_array_equal(label_a.pos, label_b.pos)
@testing.requires_testing_data
def test_annot_io():
"""Test I/O from and to *.annot files"""
# copy necessary files from fsaverage to tempdir
tempdir = _TempDir()
subject = 'fsaverage'
label_src = os.path.join(subjects_dir, 'fsaverage', 'label')
surf_src = os.path.join(subjects_dir, 'fsaverage', 'surf')
label_dir = os.path.join(tempdir, subject, 'label')
surf_dir = os.path.join(tempdir, subject, 'surf')
os.makedirs(label_dir)
os.mkdir(surf_dir)
shutil.copy(os.path.join(label_src, 'lh.PALS_B12_Lobes.annot'), label_dir)
shutil.copy(os.path.join(label_src, 'rh.PALS_B12_Lobes.annot'), label_dir)
shutil.copy(os.path.join(surf_src, 'lh.white'), surf_dir)
shutil.copy(os.path.join(surf_src, 'rh.white'), surf_dir)
# read original labels
assert_raises(IOError, read_labels_from_annot, subject, 'PALS_B12_Lobesey',
subjects_dir=tempdir)
labels = read_labels_from_annot(subject, 'PALS_B12_Lobes',
subjects_dir=tempdir)
# test saving parcellation only covering one hemisphere
parc = [l for l in labels if l.name == 'LOBE.TEMPORAL-lh']
write_labels_to_annot(parc, subject, 'myparc', subjects_dir=tempdir)
parc1 = read_labels_from_annot(subject, 'myparc', subjects_dir=tempdir)
parc1 = [l for l in parc1 if not l.name.startswith('unknown')]
assert_equal(len(parc1), len(parc))
for l1, l in zip(parc1, parc):
assert_labels_equal(l1, l)
# test saving only one hemisphere
parc = [l for l in labels if l.name.startswith('LOBE')]
write_labels_to_annot(parc, subject, 'myparc2', hemi='lh',
subjects_dir=tempdir)
annot_fname = os.path.join(tempdir, subject, 'label', '%sh.myparc2.annot')
assert_true(os.path.isfile(annot_fname % 'l'))
assert_false(os.path.isfile(annot_fname % 'r'))
parc1 = read_labels_from_annot(subject, 'myparc2',
annot_fname=annot_fname % 'l',
subjects_dir=tempdir)
parc_lh = [l for l in parc if l.name.endswith('lh')]
for l1, l in zip(parc1, parc_lh):
assert_labels_equal(l1, l)
@testing.requires_testing_data
def test_read_labels_from_annot():
"""Test reading labels from FreeSurfer parcellation
"""
# test some invalid inputs
assert_raises(ValueError, read_labels_from_annot, 'sample', hemi='bla',
subjects_dir=subjects_dir)
assert_raises(ValueError, read_labels_from_annot, 'sample',
annot_fname='bla.annot', subjects_dir=subjects_dir)
# read labels using hemi specification
labels_lh = read_labels_from_annot('sample', hemi='lh',
subjects_dir=subjects_dir)
for label in labels_lh:
assert_true(label.name.endswith('-lh'))
assert_true(label.hemi == 'lh')
assert_is_not(label.color, None)
# read labels using annot_fname
annot_fname = op.join(subjects_dir, 'sample', 'label', 'rh.aparc.annot')
labels_rh = read_labels_from_annot('sample', annot_fname=annot_fname,
subjects_dir=subjects_dir)
for label in labels_rh:
assert_true(label.name.endswith('-rh'))
assert_true(label.hemi == 'rh')
assert_is_not(label.color, None)
# combine the lh, rh, labels and sort them
labels_lhrh = list()
labels_lhrh.extend(labels_lh)
labels_lhrh.extend(labels_rh)
names = [label.name for label in labels_lhrh]
labels_lhrh = [label for (name, label) in sorted(zip(names, labels_lhrh))]
# read all labels at once
labels_both = read_labels_from_annot('sample', subjects_dir=subjects_dir)
# we have the same result
_assert_labels_equal(labels_lhrh, labels_both)
# aparc has 68 cortical labels
assert_true(len(labels_both) == 68)
# test regexp
label = read_labels_from_annot('sample', parc='aparc.a2009s',
regexp='Angu', subjects_dir=subjects_dir)[0]
assert_true(label.name == 'G_pariet_inf-Angular-lh')
# silly, but real regexp:
label = read_labels_from_annot('sample', 'aparc.a2009s',
regexp='.*-.{4,}_.{3,3}-L',
subjects_dir=subjects_dir)[0]
assert_true(label.name == 'G_oc-temp_med-Lingual-lh')
assert_raises(RuntimeError, read_labels_from_annot, 'sample', parc='aparc',
annot_fname=annot_fname, regexp='JackTheRipper',
subjects_dir=subjects_dir)
@testing.requires_testing_data
def test_read_labels_from_annot_annot2labels():
"""Test reading labels from parc. by comparing with mne_annot2labels
"""
label_fnames = glob.glob(label_dir + '/*.label')
label_fnames.sort()
labels_mne = [read_label(fname) for fname in label_fnames]
labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
# we have the same result, mne does not fill pos, so ignore it
_assert_labels_equal(labels, labels_mne, ignore_pos=True)
@testing.requires_testing_data
def test_write_labels_to_annot():
"""Test writing FreeSurfer parcellation from labels"""
tempdir = _TempDir()
labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
# create temporary subjects-dir skeleton
surf_dir = op.join(subjects_dir, 'sample', 'surf')
temp_surf_dir = op.join(tempdir, 'sample', 'surf')
os.makedirs(temp_surf_dir)
shutil.copy(op.join(surf_dir, 'lh.white'), temp_surf_dir)
shutil.copy(op.join(surf_dir, 'rh.white'), temp_surf_dir)
os.makedirs(op.join(tempdir, 'sample', 'label'))
# test automatic filenames
dst = op.join(tempdir, 'sample', 'label', '%s.%s.annot')
write_labels_to_annot(labels, 'sample', 'test1', subjects_dir=tempdir)
assert_true(op.exists(dst % ('lh', 'test1')))
assert_true(op.exists(dst % ('rh', 'test1')))
# lh only
for label in labels:
if label.hemi == 'lh':
break
write_labels_to_annot([label], 'sample', 'test2', subjects_dir=tempdir)
assert_true(op.exists(dst % ('lh', 'test2')))
assert_true(op.exists(dst % ('rh', 'test2')))
# rh only
for label in labels:
if label.hemi == 'rh':
break
write_labels_to_annot([label], 'sample', 'test3', subjects_dir=tempdir)
assert_true(op.exists(dst % ('lh', 'test3')))
assert_true(op.exists(dst % ('rh', 'test3')))
# label alone
assert_raises(TypeError, write_labels_to_annot, labels[0], 'sample',
'test4', subjects_dir=tempdir)
# write left and right hemi labels with filenames:
fnames = [op.join(tempdir, hemi + '-myparc') for hemi in ['lh', 'rh']]
with warnings.catch_warnings(record=True): # specify subject_dir param
for fname in fnames:
write_labels_to_annot(labels, annot_fname=fname)
# read it back
labels2 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[0])
labels22 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[1])
labels2.extend(labels22)
names = [label.name for label in labels2]
for label in labels:
idx = names.index(label.name)
assert_labels_equal(label, labels2[idx])
# same with label-internal colors
for fname in fnames:
write_labels_to_annot(labels, 'sample', annot_fname=fname,
overwrite=True, subjects_dir=subjects_dir)
labels3 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[0])
labels33 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[1])
labels3.extend(labels33)
names3 = [label.name for label in labels3]
for label in labels:
idx = names3.index(label.name)
assert_labels_equal(label, labels3[idx])
# make sure we can't overwrite things
assert_raises(ValueError, write_labels_to_annot, labels, 'sample',
annot_fname=fnames[0], subjects_dir=subjects_dir)
# however, this works
write_labels_to_annot(labels, 'sample', annot_fname=fnames[0],
overwrite=True, subjects_dir=subjects_dir)
# label without color
labels_ = labels[:]
labels_[0] = labels_[0].copy()
labels_[0].color = None
write_labels_to_annot(labels_, 'sample', annot_fname=fnames[0],
overwrite=True, subjects_dir=subjects_dir)
# duplicate color
labels_[0].color = labels_[2].color
assert_raises(ValueError, write_labels_to_annot, labels_, 'sample',
annot_fname=fnames[0], overwrite=True,
subjects_dir=subjects_dir)
# invalid color inputs
labels_[0].color = (1.1, 1., 1., 1.)
assert_raises(ValueError, write_labels_to_annot, labels_, 'sample',
annot_fname=fnames[0], overwrite=True,
subjects_dir=subjects_dir)
# overlapping labels
labels_ = labels[:]
cuneus_lh = labels[6]
precuneus_lh = labels[50]
labels_.append(precuneus_lh + cuneus_lh)
assert_raises(ValueError, write_labels_to_annot, labels_, 'sample',
annot_fname=fnames[0], overwrite=True,
subjects_dir=subjects_dir)
# unlabeled vertices
labels_lh = [label for label in labels if label.name.endswith('lh')]
write_labels_to_annot(labels_lh[1:], 'sample', annot_fname=fnames[0],
overwrite=True, subjects_dir=subjects_dir)
labels_reloaded = read_labels_from_annot('sample', annot_fname=fnames[0],
subjects_dir=subjects_dir)
assert_equal(len(labels_lh), len(labels_reloaded))
label0 = labels_lh[0]
label1 = labels_reloaded[-1]
assert_equal(label1.name, "unknown-lh")
assert_true(np.all(np.in1d(label0.vertices, label1.vertices)))
# unnamed labels
labels4 = labels[:]
labels4[0].name = None
assert_raises(ValueError, write_labels_to_annot, labels4,
annot_fname=fnames[0])
@requires_sklearn
@testing.requires_testing_data
def test_split_label():
"""Test splitting labels"""
aparc = read_labels_from_annot('fsaverage', 'aparc', 'lh',
regexp='lingual', subjects_dir=subjects_dir)
lingual = aparc[0]
# Test input error
assert_raises(ValueError, lingual.split, 'bad_input_string')
# split with names
parts = ('lingual_post', 'lingual_ant')
post, ant = split_label(lingual, parts, subjects_dir=subjects_dir)
# check output names
assert_equal(post.name, parts[0])
assert_equal(ant.name, parts[1])
# check vertices add up
lingual_reconst = post + ant
lingual_reconst.name = lingual.name
lingual_reconst.comment = lingual.comment
lingual_reconst.color = lingual.color
assert_labels_equal(lingual_reconst, lingual)
# compare output of Label.split() method
post1, ant1 = lingual.split(parts, subjects_dir=subjects_dir)
assert_labels_equal(post1, post)
assert_labels_equal(ant1, ant)
# compare fs_like split with freesurfer split
antmost = split_label(lingual, 40, None, subjects_dir, True)[-1]
fs_vert = [210, 4401, 7405, 12079, 16276, 18956, 26356, 32713, 32716,
32719, 36047, 36050, 42797, 42798, 42799, 59281, 59282, 59283,
71864, 71865, 71866, 71874, 71883, 79901, 79903, 79910, 103024,
107849, 107850, 122928, 139356, 139357, 139373, 139374, 139375,
139376, 139377, 139378, 139381, 149117, 149118, 149120, 149127]
assert_array_equal(antmost.vertices, fs_vert)
# check default label name
assert_equal(antmost.name, "lingual_div40-lh")
# Apply contiguous splitting to DMN label from parcellation in Yeo, 2011
label_default_mode = read_label(op.join(subjects_dir, 'fsaverage', 'label',
'lh.7Networks_7.label'))
DMN_sublabels = label_default_mode.split(parts='contiguous',
subject='fsaverage',
subjects_dir=subjects_dir)
assert_equal([len(label.vertices) for label in DMN_sublabels],
[16181, 7022, 5965, 5300, 823] + [1] * 23)
@slow_test
@testing.requires_testing_data
@requires_sklearn
def test_stc_to_label():
"""Test stc_to_label
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
src = read_source_spaces(fwd_fname)
src_bad = read_source_spaces(src_bad_fname)
stc = read_source_estimate(stc_fname, 'sample')
os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects')
labels1 = _stc_to_label(stc, src='sample', smooth=3)
labels2 = _stc_to_label(stc, src=src, smooth=3)
assert_equal(len(labels1), len(labels2))
for l1, l2 in zip(labels1, labels2):
assert_labels_equal(l1, l2, decimal=4)
with warnings.catch_warnings(record=True) as w: # connectedness warning
warnings.simplefilter('always')
labels_lh, labels_rh = stc_to_label(stc, src=src, smooth=True,
connected=True)
assert_true(len(w) > 0)
assert_raises(ValueError, stc_to_label, stc, 'sample', smooth=True,
connected=True)
assert_raises(RuntimeError, stc_to_label, stc, smooth=True, src=src_bad,
connected=True)
assert_equal(len(labels_lh), 1)
assert_equal(len(labels_rh), 1)
# test getting tris
tris = labels_lh[0].get_tris(src[0]['use_tris'], vertices=stc.vertices[0])
assert_raises(ValueError, spatial_tris_connectivity, tris,
remap_vertices=False)
connectivity = spatial_tris_connectivity(tris, remap_vertices=True)
assert_true(connectivity.shape[0] == len(stc.vertices[0]))
# "src" as a subject name
assert_raises(TypeError, stc_to_label, stc, src=1, smooth=False,
connected=False, subjects_dir=subjects_dir)
assert_raises(ValueError, stc_to_label, stc, src=SourceSpaces([src[0]]),
smooth=False, connected=False, subjects_dir=subjects_dir)
assert_raises(ValueError, stc_to_label, stc, src='sample', smooth=False,
connected=True, subjects_dir=subjects_dir)
assert_raises(ValueError, stc_to_label, stc, src='sample', smooth=True,
connected=False, subjects_dir=subjects_dir)
labels_lh, labels_rh = stc_to_label(stc, src='sample', smooth=False,
connected=False,
subjects_dir=subjects_dir)
assert_true(len(labels_lh) > 1)
assert_true(len(labels_rh) > 1)
# with smooth='patch'
with warnings.catch_warnings(record=True) as w: # connectedness warning
warnings.simplefilter('always')
labels_patch = stc_to_label(stc, src=src, smooth=True)
assert_equal(len(w), 1)
assert_equal(len(labels_patch), len(labels1))
for l1, l2 in zip(labels1, labels2):
assert_labels_equal(l1, l2, decimal=4)
@slow_test
@testing.requires_testing_data
def test_morph():
"""Test inter-subject label morphing
"""
label_orig = read_label(real_label_fname)
label_orig.subject = 'sample'
# should work for specifying vertices for both hemis, or just the
# hemi of the given label
vals = list()
for grade in [5, [np.arange(10242), np.arange(10242)], np.arange(10242)]:
label = label_orig.copy()
# this should throw an error because the label has all zero values
assert_raises(ValueError, label.morph, 'sample', 'fsaverage')
label.values.fill(1)
label = label.morph(None, 'fsaverage', 5, grade, subjects_dir, 1)
label = label.morph('fsaverage', 'sample', 5, None, subjects_dir, 2)
assert_true(np.in1d(label_orig.vertices, label.vertices).all())
assert_true(len(label.vertices) < 3 * len(label_orig.vertices))
vals.append(label.vertices)
assert_array_equal(vals[0], vals[1])
# make sure label smoothing can run
assert_equal(label.subject, 'sample')
verts = [np.arange(10242), np.arange(10242)]
for hemi in ['lh', 'rh']:
label.hemi = hemi
label.morph(None, 'fsaverage', 5, verts, subjects_dir, 2)
assert_raises(TypeError, label.morph, None, 1, 5, verts,
subjects_dir, 2)
assert_raises(TypeError, label.morph, None, 'fsaverage', 5.5, verts,
subjects_dir, 2)
with warnings.catch_warnings(record=True): # morph map could be missing
label.smooth(subjects_dir=subjects_dir) # make sure this runs
@testing.requires_testing_data
def test_grow_labels():
"""Test generation of circular source labels"""
seeds = [0, 50000]
# these were chosen manually in mne_analyze
should_be_in = [[49, 227], [51207, 48794]]
hemis = [0, 1]
names = ['aneurism', 'tumor']
labels = grow_labels('sample', seeds, 3, hemis, subjects_dir, names=names)
tgt_names = ['aneurism-lh', 'tumor-rh']
tgt_hemis = ['lh', 'rh']
for label, seed, hemi, sh, name in zip(labels, seeds, tgt_hemis,
should_be_in, tgt_names):
assert_true(np.any(label.vertices == seed))
assert_true(np.all(np.in1d(sh, label.vertices)))
assert_equal(label.hemi, hemi)
assert_equal(label.name, name)
# grow labels with and without overlap
seeds = [57532, [58887, 6304]]
l01, l02 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir)
seeds = [57532, [58887, 6304]]
l11, l12 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir,
overlap=False)
# test label naming
assert_equal(l01.name, 'Label_0-lh')
assert_equal(l02.name, 'Label_1-lh')
assert_equal(l11.name, 'Label_0-lh')
assert_equal(l12.name, 'Label_1-lh')
# make sure set 1 does not overlap
overlap = np.intersect1d(l11.vertices, l12.vertices, True)
assert_array_equal(overlap, [])
# make sure both sets cover the same vertices
l0 = l01 + l02
l1 = l11 + l12
assert_array_equal(l1.vertices, l0.vertices)
@testing.requires_testing_data
def test_label_sign_flip():
"""Test label sign flip computation"""
src = read_source_spaces(src_fname)
label = Label(vertices=src[0]['vertno'][:5], hemi='lh')
src[0]['nn'][label.vertices] = np.array(
[[1., 0., 0.],
[0., 1., 0.],
[0, 0, 1.],
[1. / np.sqrt(2), 1. / np.sqrt(2), 0.],
[1. / np.sqrt(2), 1. / np.sqrt(2), 0.]])
known_flips = np.array([1, 1, np.nan, 1, 1])
idx = [0, 1, 3, 4] # indices that are usable (third row is orthognoal)
flip = label_sign_flip(label, src)
# Need the abs here because the direction is arbitrary
assert_array_almost_equal(np.abs(np.dot(flip[idx], known_flips[idx])),
len(idx))
@testing.requires_testing_data
def test_label_center_of_mass():
"""Test computing the center of mass of a label"""
stc = read_source_estimate(stc_fname)
stc.lh_data[:] = 0
vertex_stc = stc.center_of_mass('sample', subjects_dir=subjects_dir)[0]
assert_equal(vertex_stc, 124791)
label = Label(stc.vertices[1], pos=None, values=stc.rh_data.mean(axis=1),
hemi='rh', subject='sample')
vertex_label = label.center_of_mass(subjects_dir=subjects_dir)
assert_equal(vertex_label, vertex_stc)
labels = read_labels_from_annot('sample', parc='aparc.a2009s',
subjects_dir=subjects_dir)
src = read_source_spaces(src_fname)
# Try a couple of random ones, one from left and one from right
# Visually verified in about the right place using mne_analyze
for label, expected in zip([labels[2], labels[3], labels[-5]],
[141162, 145221, 55979]):
label.values[:] = -1
assert_raises(ValueError, label.center_of_mass,
subjects_dir=subjects_dir)
label.values[:] = 1
assert_equal(label.center_of_mass(subjects_dir=subjects_dir), expected)
assert_equal(label.center_of_mass(subjects_dir=subjects_dir,
restrict_vertices=label.vertices),
expected)
# restrict to source space
idx = 0 if label.hemi == 'lh' else 1
# this simple nearest version is not equivalent, but is probably
# close enough for many labels (including the test ones):
pos = label.pos[np.where(label.vertices == expected)[0][0]]
pos = (src[idx]['rr'][src[idx]['vertno']] - pos)
pos = np.argmin(np.sum(pos * pos, axis=1))
src_expected = src[idx]['vertno'][pos]
# see if we actually get the same one
src_restrict = np.intersect1d(label.vertices, src[idx]['vertno'])
assert_equal(label.center_of_mass(subjects_dir=subjects_dir,
restrict_vertices=src_restrict),
src_expected)
assert_equal(label.center_of_mass(subjects_dir=subjects_dir,
restrict_vertices=src),
src_expected)
# degenerate cases
assert_raises(ValueError, label.center_of_mass, subjects_dir=subjects_dir,
restrict_vertices='foo')
assert_raises(TypeError, label.center_of_mass, subjects_dir=subjects_dir,
surf=1)
assert_raises(IOError, label.center_of_mass, subjects_dir=subjects_dir,
surf='foo')
run_tests_if_main()
| bsd-3-clause |
thilbern/scikit-learn | sklearn/metrics/tests/test_common.py | 2 | 42318 | from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not coverd by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": r2_score,
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_MULTICLASS = [
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
# Those metrics don't support multiclass outputs
"average_precision_score", "weighted_average_precision_score",
"micro_average_precision_score", "macro_average_precision_score",
"samples_average_precision_score",
"label_ranking_average_precision_score",
"roc_auc_score", "micro_roc_auc", "weighted_roc_auc",
"macro_roc_auc", "samples_roc_auc",
"coverage_error",
]
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = [
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
]
# Treshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = [
"roc_auc_score", "average_precision_score",
]
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = [
"roc_curve", "hinge_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "labels" argument
# XXX: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = [
"confusion_matrix",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = [
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
]
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = [
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "macro_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"macro_average_precision_score",
"coverage_error",
]
# Classification metrics with "multilabel-indicator" and
# "multilabel-sequence" format support
MULTILABELS_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
]
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = [
"mean_absolute_error", "mean_squared_error", "r2_score",
]
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "weighted_f1_score", "micro_f1_score", "macro_f1_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error"
]
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = [
"explained_variance_score",
"r2_score",
"confusion_matrix",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f2_score", "weighted_precision_score",
"weighted_recall_score",
"micro_f0.5_score", "micro_f2_score", "micro_precision_score",
"micro_recall_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
]
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = [
"confusion_matrix",
"hamming_loss",
"matthews_corrcoef_score",
"median_absolute_error",
]
def test_symmetry():
"""Test the symmetry of score and loss functions"""
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(set(SYMMETRIC_METRICS).union(NOT_SYMMETRIC_METRICS,
THRESHOLDED_METRICS,
METRIC_UNDEFINED_MULTICLASS),
set(ALL_METRICS))
assert_equal(
set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)),
msg="%s seems to be symmetric" % name)
def test_sample_order_invariance():
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
def test_format_invariance_with_1d_vectors():
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_equal(y1_1d.ndim, 1)
assert_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
measure = metric(y1, y2)
assert_almost_equal(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant "
"with list" % name)
assert_almost_equal(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant "
"with np-array-1d" % name)
assert_almost_equal(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant "
"with np-array-column" % name)
# Mix format support
assert_almost_equal(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
def test_invariance_string_vs_numbers_labels():
"""Ensure that classification metrics with string labels"""
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
for name, metric in CLASSIFICATION_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
for name, metric in THRESHOLDED_METRICS.items():
if name in ("log_loss", "hinge_loss", "unnormalized_log_loss"):
measure_with_number = metric(y1, y2)
measure_with_str = metric(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
@ignore_warnings
def check_single_sample(name):
"""Non-regression test: scores should work with a single sample.
This is important for leave-one-out cross validation.
Score functions tested are those that formerly called np.squeeze,
which turns an array of size 1 into a 0-d array (!).
"""
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
def test_single_sample():
for name in ALL_METRICS:
if name in METRIC_UNDEFINED_MULTICLASS or name in THRESHOLDED_METRICS:
# Those metrics are not always defined with one sample
# or in multiclass classification
continue
yield check_single_sample, name
for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS:
yield check_single_sample_multioutput, name
def test_multioutput_number_of_output_differ():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
def test_multioutput_regression_invariance_to_dimension_shuffling():
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling "
"invariant" % name)
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
# using sequence of sequences is deprecated, but still tested
make_ml = ignore_warnings(make_multilabel_classification)
_, y1 = make_ml(n_features=1, n_classes=n_classes, random_state=0,
n_samples=n_samples)
_, y2 = make_ml(n_features=1, n_classes=n_classes, random_state=1,
n_samples=n_samples)
# Be sure to have at least one empty label
y1 += ([], )
y2 += ([], )
# NOTE: The "sorted" trick is necessary to shuffle labels, because it
# allows to return the shuffled tuple.
rng = check_random_state(42)
shuffled = lambda x: sorted(x, key=lambda *args: rng.rand())
y1_shuffle = [shuffled(x) for x in y1]
y2_shuffle = [shuffled(x) for x in y2]
# Let's have redundant labels
y2_redundant = [x * rng.randint(1, 4) for x in y2]
# Binary indicator matrix format
lb = MultiLabelBinarizer().fit([range(n_classes)])
y1_binary_indicator = lb.transform(y1)
y2_binary_indicator = lb.transform(y2)
y1_sparse_indicator = sp.coo_matrix(y1_binary_indicator)
y2_sparse_indicator = sp.coo_matrix(y2_binary_indicator)
y1_shuffle_binary_indicator = lb.transform(y1_shuffle)
y2_shuffle_binary_indicator = lb.transform(y2_shuffle)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1_binary_indicator, y2_binary_indicator)
# Check representation invariance
assert_almost_equal(metric(y1_sparse_indicator,
y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and sparse indicator "
"formats." % name)
# Check shuffling invariance with dense binary indicator matrix
assert_almost_equal(metric(y1_shuffle_binary_indicator,
y2_shuffle_binary_indicator), measure,
err_msg="%s failed shuffling invariance "
" with dense binary indicator format."
% name)
# Check deprecation warnings related to sequence of sequences
deprecated_metric = partial(assert_warns, DeprecationWarning, metric)
# Check representation invariance
assert_almost_equal(deprecated_metric(y1, y2),
measure,
err_msg="%s failed representation invariance "
"between list of list of labels "
"format and dense binary indicator "
"format." % name)
# Check invariance with redundant labels with list of labels
assert_almost_equal(deprecated_metric(y1, y2_redundant), measure,
err_msg="%s failed rendundant label invariance"
% name)
# Check shuffling invariance with list of labels
assert_almost_equal(deprecated_metric(y1_shuffle, y2_shuffle), measure,
err_msg="%s failed shuffling invariance "
"with list of list of labels format."
% name)
# Check raises error with mix input representation
assert_raises(ValueError, deprecated_metric, y1, y2_binary_indicator)
assert_raises(ValueError, deprecated_metric, y1_binary_indicator, y2)
def test_normalize_option_binary_classification(n_samples=20):
# Test in the binary case
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multiclasss_classification():
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# using sequence of sequences is deprecated, but still tested
make_ml = ignore_warnings(make_multilabel_classification)
_, y_true = make_ml(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples)
_, y_pred = make_ml(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples)
# Be sure to have at least one empty label
y_true += ([], )
y_pred += ([], )
n_samples += 1
lb = MultiLabelBinarizer().fit([range(n_classes)])
y_true_binary_indicator = lb.transform(y_true)
y_pred_binary_indicator = lb.transform(y_pred)
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
# List of list of labels
measure = assert_warns(DeprecationWarning, metrics, y_true, y_pred,
normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(ignore_warnings(metrics)(y_true, y_pred,
normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
# Indicator matrix format
measure = metrics(y_true_binary_indicator,
y_pred_binary_indicator, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true_binary_indicator,
y_pred_binary_indicator, normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_array_almost_equal(label_measure,
[metric(y_true_binarize[:, i],
y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(),
y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_almost_equal(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, np.average(label_measure,
weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_almost_equal(sample_measure,
np.mean([metric(y_true_binarize[i],
y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
def test_averaging_multiclass(n_samples=50, n_classes=3):
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel(n_classes=5, n_samples=40):
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
return_indicator=True,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
def test_averaging_multilabel_all_ones():
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_almost_equal(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
assert_not_equal(
unweighted_score, weighted_score,
msg="Unweighted and weighted scores are unexpectedly "
"equal (%f) for %s" % (weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_almost_equal(
weighted_score, weighted_score_list,
err_msg="Weighted scores for array and list sample_weight input are "
"not equal (%f != %f) for %s" % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_almost_equal(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_almost_equal(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%f != %f) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_almost_equal(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if sample_weight.shape[0] != y_true.shape[0], it raised an
# error
assert_raises(Exception, metric, y1, y2,
sample_weight=np.hstack([sample_weight, sample_weight]))
def test_sample_weight_invariance(n_samples=50):
random_state = check_random_state(0)
# binary output
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multiclass
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multilabel sequence
y_true = 2 * [(1, 2, ), (1, ), (0, ), (0, 1), (1, 2)]
y_pred = 2 * [(0, 2, ), (2, ), (0, ), (2, ), (1,)]
y_score = random_state.randn(10, 3)
for name in MULTILABELS_METRICS:
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (check_sample_weight_invariance, name, metric, y_true,
y_score)
else:
yield (check_sample_weight_invariance, name, metric, y_true,
y_pred)
# multilabel indicator
_, ya = make_multilabel_classification(
n_features=1, n_classes=20,
random_state=0, n_samples=100,
return_indicator=True, allow_unlabeled=False)
_, yb = make_multilabel_classification(
n_features=1, n_classes=20,
random_state=1, n_samples=100,
return_indicator=True, allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTIOUTPUT_METRICS):
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (check_sample_weight_invariance, name, metric, y_true,
y_score)
else:
yield (check_sample_weight_invariance, name, metric, y_true,
y_pred)
| bsd-3-clause |
shangwuhencc/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 267 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
metpy/MetPy | examples/meteogram_metpy.py | 6 | 8767 | # Copyright (c) 2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Meteogram
=========
Plots time series data as a meteogram.
"""
import datetime as dt
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from metpy.calc import dewpoint_rh
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo
from metpy.units import units
def calc_mslp(t, p, h):
return p * (1 - (0.0065 * h) / (t + 0.0065 * h + 273.15)) ** (-5.257)
# Make meteogram plot
class Meteogram(object):
""" Plot a time series of meteorological data from a particular station as a
meteogram with standard variables to visualize, including thermodynamic,
kinematic, and pressure. The functions below control the plotting of each
variable.
TO DO: Make the subplot creation dynamic so the number of rows is not
static as it is currently. """
def __init__(self, fig, dates, probeid, time=None, axis=0):
"""
Required input:
fig: figure object
dates: array of dates corresponding to the data
probeid: ID of the station
Optional Input:
time: Time the data is to be plotted
axis: number that controls the new axis to be plotted (FOR FUTURE)
"""
if not time:
time = dt.datetime.utcnow()
self.start = dates[0]
self.fig = fig
self.end = dates[-1]
self.axis_num = 0
self.dates = mpl.dates.date2num(dates)
self.time = time.strftime('%Y-%m-%d %H:%M UTC')
self.title = 'Latest Ob Time: {0}\nProbe ID: {1}'.format(self.time, probeid)
def plot_winds(self, ws, wd, wsmax, plot_range=None):
"""
Required input:
ws: Wind speeds (knots)
wd: Wind direction (degrees)
wsmax: Wind gust (knots)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT WIND SPEED AND WIND DIRECTION
self.ax1 = fig.add_subplot(4, 1, 1)
ln1 = self.ax1.plot(self.dates, ws, label='Wind Speed')
self.ax1.fill_between(self.dates, ws, 0)
self.ax1.set_xlim(self.start, self.end)
if not plot_range:
plot_range = [0, 20, 1]
self.ax1.set_ylabel('Wind Speed (knots)', multialignment='center')
self.ax1.set_ylim(plot_range[0], plot_range[1], plot_range[2])
self.ax1.grid(b=True, which='major', axis='y', color='k', linestyle='--',
linewidth=0.5)
ln2 = self.ax1.plot(self.dates, wsmax, '.r', label='3-sec Wind Speed Max')
ax7 = self.ax1.twinx()
ln3 = ax7.plot(self.dates, wd, '.k', linewidth=0.5, label='Wind Direction')
ax7.set_ylabel('Wind\nDirection\n(degrees)', multialignment='center')
ax7.set_ylim(0, 360)
ax7.set_yticks(np.arange(45, 405, 90), ['NE', 'SE', 'SW', 'NW'])
lns = ln1 + ln2 + ln3
labs = [l.get_label() for l in lns]
ax7.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
ax7.legend(lns, labs, loc='upper center',
bbox_to_anchor=(0.5, 1.2), ncol=3, prop={'size': 12})
def plot_thermo(self, t, td, plot_range=None):
"""
Required input:
T: Temperature (deg F)
TD: Dewpoint (deg F)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT TEMPERATURE AND DEWPOINT
if not plot_range:
plot_range = [10, 90, 2]
self.ax2 = fig.add_subplot(4, 1, 2, sharex=self.ax1)
ln4 = self.ax2.plot(self.dates, t, 'r-', label='Temperature')
self.ax2.fill_between(self.dates, t, td, color='r')
self.ax2.set_ylabel('Temperature\n(F)', multialignment='center')
self.ax2.grid(b=True, which='major', axis='y', color='k', linestyle='--',
linewidth=0.5)
self.ax2.set_ylim(plot_range[0], plot_range[1], plot_range[2])
ln5 = self.ax2.plot(self.dates, td, 'g-', label='Dewpoint')
self.ax2.fill_between(self.dates, td, self.ax2.get_ylim()[0], color='g')
ax_twin = self.ax2.twinx()
ax_twin.set_ylim(plot_range[0], plot_range[1], plot_range[2])
lns = ln4 + ln5
labs = [l.get_label() for l in lns]
ax_twin.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
self.ax2.legend(lns, labs, loc='upper center',
bbox_to_anchor=(0.5, 1.2), ncol=2, prop={'size': 12})
def plot_rh(self, rh, plot_range=None):
"""
Required input:
RH: Relative humidity (%)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT RELATIVE HUMIDITY
if not plot_range:
plot_range = [0, 100, 4]
self.ax3 = fig.add_subplot(4, 1, 3, sharex=self.ax1)
self.ax3.plot(self.dates, rh, 'g-', label='Relative Humidity')
self.ax3.legend(loc='upper center', bbox_to_anchor=(0.5, 1.22), prop={'size': 12})
self.ax3.grid(b=True, which='major', axis='y', color='k', linestyle='--',
linewidth=0.5)
self.ax3.set_ylim(plot_range[0], plot_range[1], plot_range[2])
self.ax3.fill_between(self.dates, rh, self.ax3.get_ylim()[0], color='g')
self.ax3.set_ylabel('Relative Humidity\n(%)', multialignment='center')
self.ax3.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
axtwin = self.ax3.twinx()
axtwin.set_ylim(plot_range[0], plot_range[1], plot_range[2])
def plot_pressure(self, p, plot_range=None):
"""
Required input:
P: Mean Sea Level Pressure (hPa)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT PRESSURE
if not plot_range:
plot_range = [970, 1030, 2]
self.ax4 = fig.add_subplot(4, 1, 4, sharex=self.ax1)
self.ax4.plot(self.dates, p, 'm', label='Mean Sea Level Pressure')
self.ax4.set_ylabel('Mean Sea\nLevel Pressure\n(mb)', multialignment='center')
self.ax4.set_ylim(plot_range[0], plot_range[1], plot_range[2])
axtwin = self.ax4.twinx()
axtwin.set_ylim(plot_range[0], plot_range[1], plot_range[2])
axtwin.fill_between(self.dates, p, axtwin.get_ylim()[0], color='m')
axtwin.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
self.ax4.legend(loc='upper center', bbox_to_anchor=(0.5, 1.2), prop={'size': 12})
self.ax4.grid(b=True, which='major', axis='y', color='k', linestyle='--',
linewidth=0.5)
# OTHER OPTIONAL AXES TO PLOT
# plot_irradiance
# plot_precipitation
# set the starttime and endtime for plotting, 24 hour range
endtime = dt.datetime(2016, 3, 31, 22, 0, 0, 0)
starttime = endtime - dt.timedelta(hours=24)
# Height of the station to calculate MSLP
hgt_example = 292.
# Parse dates from .csv file, knowing their format as a string and convert to datetime
def parse_date(date):
return dt.datetime.strptime(date.decode('ascii'), '%Y-%m-%d %H:%M:%S')
testdata = np.genfromtxt(get_test_data('timeseries.csv', False), names=True, dtype=None,
usecols=list(range(1, 8)),
converters={'DATE': parse_date}, delimiter=',')
# Temporary variables for ease
temp = testdata['T']
pres = testdata['P']
rh = testdata['RH']
ws = testdata['WS']
wsmax = testdata['WSMAX']
wd = testdata['WD']
date = testdata['DATE']
# ID For Plotting on Meteogram
probe_id = '0102A'
data = {'wind_speed': (np.array(ws) * units('m/s')).to(units('knots')),
'wind_speed_max': (np.array(wsmax) * units('m/s')).to(units('knots')),
'wind_direction': np.array(wd) * units('degrees'),
'dewpoint': dewpoint_rh((np.array(temp) * units('degC')).to(units('K')),
np.array(rh) / 100.).to(units('degF')),
'air_temperature': (np.array(temp) * units('degC')).to(units('degF')),
'mean_slp': calc_mslp(np.array(temp), np.array(pres), hgt_example) * units('hPa'),
'relative_humidity': np.array(rh), 'times': np.array(date)}
fig = plt.figure(figsize=(20, 16))
add_metpy_logo(fig, 250, 180)
meteogram = Meteogram(fig, data['times'], probe_id)
meteogram.plot_winds(data['wind_speed'], data['wind_direction'], data['wind_speed_max'])
meteogram.plot_thermo(data['air_temperature'], data['dewpoint'])
meteogram.plot_rh(data['relative_humidity'])
meteogram.plot_pressure(data['mean_slp'])
fig.subplots_adjust(hspace=0.5)
plt.show()
| bsd-3-clause |
WillieMaddox/numpy | numpy/core/function_base.py | 7 | 6565 | from __future__ import division, absolute_import, print_function
__all__ = ['logspace', 'linspace']
from . import numeric as _nx
from .numeric import result_type, NaN, shares_memory, MAY_SHARE_BOUNDS, TooHardError
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
start = start * 1.
stop = stop * 1.
dt = result_type(start, stop, float(num))
if dtype is None:
dtype = dt
y = _nx.arange(0, num, dtype=dt)
if num > 1:
delta = stop - start
step = delta / div
if step == 0:
# Special handling for denormal numbers, gh-5437
y /= div
y *= delta
else:
y *= step
else:
# 0 and 1 item long sequences have an undefined step
step = NaN
y += start
if endpoint and num > 1:
y[-1] = stop
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype)
| bsd-3-clause |
totalgood/nlpia | src/nlpia/mavis_greetings.py | 1 | 1246 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Constants and discovered values, like path to current installation of pug-nlp."""
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals, division, absolute_import
from builtins import (bytes, dict, int, list, object, range, str, # noqa
ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)
from future import standard_library
standard_library.install_aliases() # noqa: Counter, OrderedDict,
import os
import pandas as pd
from pugnlp.constants import DATA_PATH
if __name__ == '__main__':
df = pd.DataFrame()
for is_greeting, filename in enumerate(['mavis-batey-sentences.txt', 'mavis-batey-greetings.txt']):
with open(os.path.join(DATA_PATH, filename)) as f:
df = pd.concat([df, pd.DataFrame([[sentence.strip(), is_greeting] for sentence in f],
columns=['sentence', 'is_greeting'])],
ignore_index=True)
df.to_csv(os.path.join(DATA_PATH, 'mavis-greeting-training-set.csv'))
# df = pd.DataFrame.from_csv(
# 'https://raw.githubusercontent.com/totalgood/pugnlp/master/pugnlp/data/mavis-greeting-training-set.csv',
# header=0)
| mit |
DSLituiev/scikit-learn | sklearn/neighbors/graph.py | 14 | 6609 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self):
"""Return the query based on include_self param"""
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=1):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default=False.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=1):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default=False
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5, mode='connectivity', include_self=True)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
wwf5067/statsmodels | statsmodels/examples/tsa/ex_arma_all.py | 34 | 1982 |
from __future__ import print_function
import numpy as np
from numpy.testing import assert_almost_equal
import matplotlib.pyplot as plt
import statsmodels.sandbox.tsa.fftarma as fa
from statsmodels.tsa.descriptivestats import TsaDescriptive
from statsmodels.tsa.arma_mle import Arma
x = fa.ArmaFft([1, -0.5], [1., 0.4], 40).generate_sample(size=200, burnin=1000)
d = TsaDescriptive(x)
d.plot4()
#d.fit(order=(1,1))
d.fit((1,1), trend='nc')
print(d.res.params)
modc = Arma(x)
resls = modc.fit(order=(1,1))
print(resls[0])
rescm = modc.fit_mle(order=(1,1), start_params=[-0.4,0.4, 1.])
print(rescm.params)
#decimal 1 corresponds to threshold of 5% difference
assert_almost_equal(resls[0] / d.res.params, 1, decimal=1)
assert_almost_equal(rescm.params[:-1] / d.res.params, 1, decimal=1)
#copied to tsa.tests
plt.figure()
plt.plot(x, 'b-o')
plt.plot(modc.predicted(), 'r-')
plt.figure()
plt.plot(modc.error_estimate)
#plt.show()
from statsmodels.miscmodels.tmodel import TArma
modct = TArma(x)
reslst = modc.fit(order=(1,1))
print(reslst[0])
rescmt = modct.fit_mle(order=(1,1), start_params=[-0.4,0.4, 10, 1.],maxiter=500,
maxfun=500)
print(rescmt.params)
from statsmodels.tsa.arima_model import ARMA
mkf = ARMA(x)
##rkf = mkf.fit((1,1))
##rkf.params
rkf = mkf.fit((1,1), trend='nc')
print(rkf.params)
from statsmodels.tsa.arima_process import arma_generate_sample
np.random.seed(12345)
y_arma22 = arma_generate_sample([1.,-.85,.35, -0.1],[1,.25,-.7], nsample=1000)
##arma22 = ARMA(y_arma22)
##res22 = arma22.fit(trend = 'nc', order=(2,2))
##print 'kf ',res22.params
##res22css = arma22.fit(method='css',trend = 'nc', order=(2,2))
##print 'css', res22css.params
mod22 = Arma(y_arma22)
resls22 = mod22.fit(order=(2,2))
print('ls ', resls22[0])
resmle22 = mod22.fit_mle(order=(2,2), maxfun=2000)
print('mle', resmle22.params)
f = mod22.forecast()
f3 = mod22.forecast3(start=900)[-20:]
print(y_arma22[-10:])
print(f[-20:])
print(f3[-109:-90])
plt.show() | bsd-3-clause |
abhijeetmote/python_stuff | to_csv.py | 1 | 1381 | import xml.etree.ElementTree as ET
import os
import sys
import fnmatch
import csv
import pdb
import glob
import tempfile
import shutil
import gzip
import datetime
import tarfile
import time
import pandas as pd
import pdb
file_name = "/home/abhijeet/test/file.xml"
output = "/home/abhijeet/test/file.csv"
#Handeling unparsable xml files
try:
tree = ET.parse(file_name)
except Exception as e:
pass
ad_sc_data = None
csv_dict = {}
root_tag = tree.getroot()
input_file = open(output, "wb")
csv_header = ['col1','col2','col3','col4','col5']
dict_writer = csv.DictWriter(input_file, delimiter='|', fieldnames=csv_header)
all_channel_tags = root_tag.getchildren()[1:]
count = 0
for ch_tags in all_channel_tags:
count += 1
print(count)
if count == 6: pdb.set_trace()
csv_dict["external_channel_ref"] = ch_tags.getchildren()[0].text
csv_dict["utc_consume_start_epoch"] = ch_tags.getchildren()[1].text
csv_dict["utc_consume_stop_epoch"] = ch_tags.getchildren()[2].text
csv_dict["timeshift"] = ch_tags.getchildren()[3].text
csv_dict["channel_audio_language"] = ch_tags.getchildren()[4].text
try:
common_session_chunk = ch_tags.getchildren()[5].getchildren()
for tag in common_session_chunk:
csv_dict[tag.tag] = tag.text
except:
pass
dict_writer.writerow(csv_dict)
print csv_dict
input_file.close()
| gpl-3.0 |
hitszxp/scikit-learn | benchmarks/bench_plot_nmf.py | 206 | 5890 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
gergopokol/renate-od | visualization/profiles.py | 1 | 9125 | import matplotlib.pyplot
import utility
from matplotlib.backends.backend_pdf import PdfPages
import datetime
from crm_solver.atomic_db import RenateDB
class BeamletProfiles:
def __init__(self, param_path='output/beamlet/beamlet_test.xml', key=['profiles']):
self.param_path = param_path
self.param = utility.getdata.GetData(data_path_name=self.param_path).data
self.access_path = self.param.getroot().find('body').find('beamlet_profiles').text
self.key = key
self.components = utility.getdata.GetData(data_path_name=self.access_path, data_key=self.key)
self.profiles = utility.getdata.GetData(data_path_name=self.access_path, data_key=self.key).data
self.atomic_db = RenateDB(self.param, 'default', self.access_path)
self.title = None
def set_x_range(self, x_min=None, x_max=None):
self.x_limits = [x_min, x_max]
def plot_RENATE_bechmark(self, plot_type='population'):
fig1 = matplotlib.pyplot.figure()
grid = matplotlib.pyplot.GridSpec(3, 1)
ax1 = matplotlib.pyplot.subplot(grid[0, 0])
ax1 = self.__setup_density_axis(ax1)
ax2 = ax1.twinx()
self.__setup_temperature_axis(ax2)
self.title = 'Plasma profiles'
ax1.set_title(self.title)
self.__setup_RENATE_benchmark_axis(matplotlib.pyplot.subplot(grid[1:, 0]), plot_type)
matplotlib.pyplot.show()
def __setup_RENATE_benchmark_axis(self, axis, plot_type):
max_val = self.profiles['level ' + self.atomic_db.inv_atomic_dict[0]][0]
for level in self.atomic_db.atomic_dict.keys():
if plot_type is 'population':
axis.plot(self.profiles['beamlet grid'], self.profiles['RENATE level ' +
str(self.atomic_db.atomic_dict[level])], '-', label='RENATE '+level)
axis.plot(self.profiles['beamlet grid'], self.profiles['level '+level]/max_val,
'--', label='ROD '+level)
axis.set_ylabel('Relative electron population [-]')
axis.set_yscale('log', nonposy='clip')
elif plot_type is 'error':
axis.set_ylabel('Relative error [-]')
axis.plot(self.profiles['beamlet grid'], abs(self.profiles['level '+level]/max_val -
self.profiles['RENATE level ' + str(self.atomic_db.atomic_dict[level])]) /
(self.profiles['level '+level]/max_val), '--', label='Level '+level)
else:
raise ValueError('Grid type ' + plot_type + 'not implemented!')
if hasattr(self, 'x_limits'):
axis.set_xlim(self.x_limits)
axis.legend(loc='best', ncol=1)
self.title = 'Benchmark: RENATE - ROD'
axis.set_title(self.title)
axis.grid()
return axis
def plot_linear_emission_density(self, from_level=None, to_level=None):
axis_dens = matplotlib.pyplot.subplot()
self.__setup_density_axis(axis_dens)
axis_dens.set_xlabel('Distance [m]')
axis_em = axis_dens.twinx()
if from_level is None or to_level is None or not isinstance(from_level, str) or not isinstance(to_level, str):
from_level, to_level, ground_level, transition = self.atomic_db.set_default_atomic_levels()
else:
transition = from_level + '-' + to_level
self.__setup_linear_emission_density_axis(axis_em, transition)
matplotlib.pyplot.show()
def __setup_linear_emission_density_axis(self, axis, transition):
try:
axis.plot(self.profiles['beamlet grid'], self.profiles[transition],
label='Emission for '+transition, color='r')
except KeyError:
raise Exception('The requested transition: <'+transition+'> is not in the stored data. '
'Try computing it first or please make sure it exists')
axis.set_ylabel('Linear emission density [ph/sm]')
axis.yaxis.label.set_color('r')
axis.legend(loc='upper right')
return axis
def plot_attenuation(self):
axis_dens = matplotlib.pyplot.subplot()
self.__setup_density_axis(axis_dens)
axis_dens.set_xlabel('Distance [m]')
axis_em = axis_dens.twinx()
self.__setup_linear_density_attenuation_axis(axis_em)
matplotlib.pyplot.show()
def __setup_linear_density_attenuation_axis(self, axis):
axis.plot(self.profiles['beamlet grid'], self.profiles['linear_density_attenuation'],
label='Linear density attenuation', color='r')
axis.set_ylabel('Linear density [1/m]')
axis.yaxis.label.set_color('r')
axis.legend(loc='upper right')
return axis
def plot_relative_populations(self):
axis = matplotlib.pyplot.subplot()
self.__setup_population_axis(axis, kind='relative')
matplotlib.pyplot.show()
def plot_populations(self):
axis = matplotlib.pyplot.subplot()
self.__setup_population_axis(axis)
matplotlib.pyplot.show()
def plot_all_profiles(self):
fig1 = matplotlib.pyplot.figure()
grid = matplotlib.pyplot.GridSpec(3, 1)
ax1 = matplotlib.pyplot.subplot(grid[0, 0])
ax1 = self.__setup_density_axis(ax1)
ax2 = ax1.twinx()
self.__setup_temperature_axis(ax2)
self.title = 'Plasma profiles'
ax1.set_title(self.title)
ax3 = matplotlib.pyplot.subplot(grid[1:, 0])
self.__setup_population_axis(ax3)
fig1.tight_layout()
matplotlib.pyplot.show()
def benchmark(self, benchmark_param_path='../data/beamlet/IMAS_beamlet_test_profiles_Li.xml', key=['profiles']):
benchmark_param = utility.getdata.GetData(data_path_name=benchmark_param_path).data
benchmark_path = benchmark_param.getroot().find('body').find('beamlet_profiles').text
benchmark_profiles = utility.getdata.GetData(data_path_name=benchmark_path, data_key=key).data
fig1 = matplotlib.pyplot.figure()
ax1 = matplotlib.pyplot.subplot()
ax1 = self.__setup_population_axis(ax1)
ax1 = self.setup_benchmark_axis(benchmark_profiles, axis=ax1)
ax1.legend(loc='best', ncol=2)
self.title = 'Beamlet profiles - benchmark'
ax1.set_title(self.title)
ax1.grid()
fig1.tight_layout()
matplotlib.pyplot.show()
def __setup_density_axis(self, axis):
axis.plot(self.profiles['beamlet grid'], self.profiles['electron']
['density']['m-3'], label='Density', color='b')
if hasattr(self, 'x_limits'):
axis.set_xlim(self.x_limits)
axis.set_ylabel('Density [1/m3]')
axis.yaxis.label.set_color('b')
axis.legend(loc='upper left')
axis.grid()
return axis
def __setup_temperature_axis(self, axis):
axis.plot(self.profiles['beamlet grid'], self.profiles['electron']['temperature']['eV'], color='r',
label='Electron_temperature')
axis.plot(self.profiles['beamlet grid'], self.profiles['ion1']['temperature']['eV'], '--', label='Ion_temperature',
color='m')
axis.set_ylabel('Temperature [eV]')
axis.yaxis.label.set_color('r')
axis.legend(loc='lower right')
axis.grid()
return axis
def __setup_population_axis(self, axis, kind='absolute'):
pandas_key, axis_name = self.set_axis_parameters(kind)
for level in range(self.atomic_db.atomic_levels):
label = pandas_key + self.atomic_db.inv_atomic_dict[level]
axis.plot(self.profiles['beamlet grid'], self.profiles[label], label=label)
if hasattr(self, 'x_limits'):
axis.set_xlim(self.x_limits)
axis.set_yscale('log', nonposy='clip')
axis.set_xlabel('Distance [m]')
axis.set_ylabel(axis_name)
axis.legend(loc='best', ncol=1)
self.title = 'Beamlet profiles'
axis.set_title(self.title)
axis.grid()
return axis
@staticmethod
def set_axis_parameters(kind):
assert isinstance(kind, str)
if kind == 'absolute':
return 'level ', 'Linear density [1/m]'
elif kind == 'relative':
return 'rel.pop ', 'Relative linear density [-]'
else:
raise ValueError('Requested plotting format not accepted')
def setup_benchmark_axis(self, benchmark_profiles, axis):
benchmark_profiles = benchmark_profiles
for level in range(self.atomic_db.atomic_levels):
label = 'level ' + str(level)
axis.plot(benchmark_profiles['beamlet grid'], benchmark_profiles[label], '--', label=label+' ref.')
return axis
def save_figure(self, file_path='data/output/beamlet/test_plot.pdf'):
with PdfPages(file_path) as pdf:
pdf.savefig()
d = pdf.infodict()
d['Title'] = self.title
d['Keywords'] = 'Source hdf5 file: ' + self.access_path + ', source xml file: ' + self.param_path
d['ModDate'] = datetime.datetime.today()
| lgpl-3.0 |
anve8004/trading-with-python | lib/cboe.py | 76 | 4433 | # -*- coding: utf-8 -*-
"""
toolset working with cboe data
@author: Jev Kuznetsov
Licence: BSD
"""
from datetime import datetime, date
import urllib2
from pandas import DataFrame, Index
from pandas.core import datetools
import numpy as np
import pandas as pd
def monthCode(month):
"""
perform month->code and back conversion
Input: either month nr (int) or month code (str)
Returns: code or month nr
"""
codes = ('F','G','H','J','K','M','N','Q','U','V','X','Z')
if isinstance(month,int):
return codes[month-1]
elif isinstance(month,str):
return codes.index(month)+1
else:
raise ValueError('Function accepts int or str')
def vixExpiration(year,month):
"""
expriration date of a VX future
"""
t = datetime(year,month,1)+datetools.relativedelta(months=1)
offset = datetools.Week(weekday=4)
if t.weekday()<>4:
t_new = t+3*offset
else:
t_new = t+2*offset
t_exp = t_new-datetools.relativedelta(days=30)
return t_exp
def getPutCallRatio():
""" download current Put/Call ratio"""
urlStr = 'http://www.cboe.com/publish/ScheduledTask/MktData/datahouse/totalpc.csv'
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
headerLine = 2
header = lines[headerLine].strip().split(',')
data = [[] for i in range(len(header))]
for line in lines[(headerLine+1):]:
fields = line.rstrip().split(',')
data[0].append(datetime.strptime(fields[0],'%m/%d/%Y'))
for i,field in enumerate(fields[1:]):
data[i+1].append(float(field))
return DataFrame(dict(zip(header[1:],data[1:])), index = Index(data[0]))
def getHistoricData(symbols = ['VIX','VXV','VXMT','VVIX']):
''' get historic data from CBOE
return dataframe
'''
if not isinstance(symbols,list):
symbols = [symbols]
urls = {'VIX':'http://www.cboe.com/publish/ScheduledTask/MktData/datahouse/vixcurrent.csv',
'VXV':'http://www.cboe.com/publish/scheduledtask/mktdata/datahouse/vxvdailyprices.csv',
'VXMT':'http://www.cboe.com/publish/ScheduledTask/MktData/datahouse/vxmtdailyprices.csv',
'VVIX':'http://www.cboe.com/publish/scheduledtask/mktdata/datahouse/VVIXtimeseries.csv'}
startLines = {'VIX':1,'VXV':2,'VXMT':2,'VVIX':1}
cols = {'VIX':'VIX Close','VXV':'CLOSE','VXMT':'Close','VVIX':'VVIX'}
data = {}
for symbol in symbols:
urlStr = urls[symbol]
print 'Downloading %s from %s' % (symbol,urlStr)
data[symbol] = pd.read_csv(urllib2.urlopen(urlStr), header=startLines[symbol],index_col=0,parse_dates=True)[cols[symbol]]
return pd.DataFrame(data)
#---------------------classes--------------------------------------------
class VixFuture(object):
"""
Class for easy handling of futures data.
"""
def __init__(self,year,month):
self.year = year
self.month = month
def expirationDate(self):
return vixExpiration(self.year,self.month)
def daysLeft(self,date):
""" business days to expiration date """
from pandas import DateRange # this will cause a problem with pandas 0.14 and higher... Method is depreciated and replaced by DatetimeIndex
r = DateRange(date,self.expirationDate())
return len(r)
def __repr__(self):
return 'VX future [%i-%i %s] Exprires: %s' % (self.year,self.month,monthCode(self.month),
self.expirationDate())
#-------------------test functions---------------------------------------
def testDownload():
vix = getHistoricData('VIX')
vxv = getHistoricData('VXV')
vix.plot()
vxv.plot()
def testExpiration():
for month in xrange(1,13):
d = vixExpiration(2011,month)
print d.strftime("%B, %d %Y (%A)")
if __name__ == '__main__':
#testExpiration()
v = VixFuture(2011,11)
print v
print v.daysLeft(datetime(2011,11,10))
| bsd-3-clause |
fberanizo/sin5006 | tests/optimization/utils.py | 1 | 2030 | # -*- coding: utf-8 -*-
import numpy, matplotlib.pyplot, pandas, seaborn
def plot(execution_info, title='', description=''):
for generation_info in execution_info:
x = numpy.arange(1, len(generation_info)+1)
max = numpy.asarray(map(lambda individual: individual["max"], generation_info))
avg = numpy.asarray(map(lambda individual: individual["avg"], generation_info))
std = numpy.asarray(map(lambda individual: individual["std"], generation_info))
matplotlib.pyplot.plot(x, max, "r", label="melhor", linewidth=1)
matplotlib.pyplot.plot(x, avg, "b", label="media", linewidth=1)
matplotlib.pyplot.plot(x, std, "k.", label="desvio")
matplotlib.pyplot.xlabel('generations')
matplotlib.pyplot.ylabel('fitness')
#legend = matplotlib.pyplot.legend(loc='lower right')
matplotlib.pyplot.title(title)
matplotlib.pyplot.figtext(.02, .02, description)
matplotlib.pyplot.gca().set_position((.1, .3, .8, .6))
matplotlib.pyplot.show()
def save_scores(filepath, grid_scores):
f = open(filepath, "w")
f.write(",".join(["Population", "Operators", "Fitness", "FitnessStdDev"]) + "\n")
for score in grid_scores:
mean_best_fitness = "{:.6f}".format(score["mean_best_fitness"])
std_best_fitness = "{:.6f}".format(score["std_best_fitness"])
population_size = str(score["params"]["population_size"])
reproduction = "{:.3f}".format(score["params"]["operators_rate"][0])
crossover = "{:.3f}".format(score["params"]["operators_rate"][1])
mutation = "{:.3f}".format(score["params"]["operators_rate"][2])
fields = [population_size, "\"R: "+reproduction+", C: "+crossover +", M: "+mutation+"\"", mean_best_fitness, std_best_fitness]
f.write(",".join(fields) + "\n")
f.close()
def plot_heatmap(filepath, dataset):
seaborn.set()
h = seaborn.heatmap(dataset, annot=True, linewidths=.5)
seaborn.plt.yticks(rotation=0)
seaborn.plt.show()
#seaborn.plt.savefig(filepath) | bsd-2-clause |
MrNuggelz/sklearn-glvq | sklearn_lvq/tests/test_glvq.py | 1 | 10494 | import numpy as np
from .. import GlvqModel
from .. import GrlvqModel
from .. import GmlvqModel
from .. import GrmlvqModel
from .. import LgmlvqModel
from sklearn.utils.testing import assert_greater, assert_raise_message, \
assert_allclose
from sklearn import datasets
from sklearn.utils import check_random_state
from sklearn.utils.estimator_checks import check_estimator
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
score = 0.9
def test_glvq_iris():
check_estimator(GlvqModel)
c = [(0, 1, 0.9), (1, 0, 1.1)]
model = GlvqModel(prototypes_per_class=2, C=c)
model.fit(iris.data, iris.target)
assert_greater(model.score(iris.data, iris.target), score)
assert_raise_message(ValueError, 'display must be a boolean',
GlvqModel(display='true').fit, iris.data, iris.target)
assert_raise_message(ValueError, 'gtol must be a positive float',
GlvqModel(gtol=-1.0).fit, iris.data, iris.target)
assert_raise_message(ValueError, 'the initial prototypes have wrong shape',
GlvqModel(initial_prototypes=[[1, 1], [2, 2]]).fit,
iris.data, iris.target)
assert_raise_message(ValueError,
'prototype labels and test data classes do not match',
GlvqModel(initial_prototypes=[[1, 1, 1, 1, 'a'],
[2, 2, 2, 2, 5],
[2, 2, 2, 2, -3]]).fit,
iris.data, iris.target)
assert_raise_message(ValueError, 'max_iter must be an positive integer',
GlvqModel(max_iter='5').fit, iris.data,
iris.target)
assert_raise_message(ValueError, 'max_iter must be an positive integer',
GlvqModel(max_iter=0).fit, iris.data,
iris.target)
assert_raise_message(ValueError, 'max_iter must be an positive integer',
GlvqModel(max_iter=-1).fit, iris.data,
iris.target)
assert_raise_message(ValueError,
'values in prototypes_per_class must be positive',
GlvqModel(prototypes_per_class=np.zeros(
np.unique(iris.target).size) - 1).fit, iris.data,
iris.target)
assert_raise_message(ValueError,
'length of prototypes per class'
' does not fit the number of',
GlvqModel(prototypes_per_class=[1, 2]).fit, iris.data,
iris.target)
assert_raise_message(ValueError, 'X has wrong number of features',
model.predict, [[1, 2], [3, 4]])
def test_grlvq_iris():
check_estimator(GrlvqModel)
c = [(0, 1, 0.9), (1, 0, 1.1)]
model = GrlvqModel(prototypes_per_class=2, C=c, regularization=0.5)
model.fit(iris.data, iris.target)
assert_greater(model.score(iris.data, iris.target), score)
model = GrlvqModel(initial_prototypes=[[0, 0, 0], [4, 4, 1]])
nb_ppc = 10
x = np.append(
np.random.multivariate_normal([0, 0], np.array([[0.3, 0], [0, 4]]),
size=nb_ppc),
np.random.multivariate_normal([4, 4], np.array([[0.3, 0], [0, 4]]),
size=nb_ppc), axis=0)
y = np.append(np.zeros(nb_ppc), np.ones(nb_ppc), axis=0)
model.fit(x, y)
assert_allclose(np.array([1.0, 0.0]), model.lambda_, atol=0.2)
assert_raise_message(ValueError, 'length of initial relevances is wrong',
GrlvqModel(initial_relevances=[1, 2]).fit, iris.data,
iris.target)
assert_raise_message(ValueError, 'regularization must be a positive float',
GrlvqModel(regularization=-1.0).fit, iris.data,
iris.target)
GrlvqModel(prototypes_per_class=2).fit(
iris.data, iris.target)
def test_gmlvq_iris():
check_estimator(GmlvqModel)
c = [(0, 1, 0.9), (1, 0, 1.1)]
model = GmlvqModel(prototypes_per_class=2, C=c, regularization=0.5)
model.fit(iris.data, iris.target)
assert_greater(model.score(iris.data, iris.target), score)
model = GmlvqModel(initial_prototypes=[[0, 0, 0], [4, 4, 1]])
nb_ppc = 10
x = np.append(
np.random.multivariate_normal([0, 0], np.array([[0.3, 0], [0, 4]]),
size=nb_ppc),
np.random.multivariate_normal([4, 4], np.array([[0.3, 0], [0, 4]]),
size=nb_ppc), axis=0)
y = np.append(np.zeros(nb_ppc), np.ones(nb_ppc), axis=0)
model.fit(x, y)
assert_allclose(np.array([[1, 0], [0.2, 0]]), model.omega_, atol=0.3)
assert_raise_message(ValueError, 'regularization must be a positive float',
GmlvqModel(regularization=-1.0).fit, iris.data,
iris.target)
assert_raise_message(ValueError,
'initial matrix has wrong number of features',
GmlvqModel(
initial_matrix=[[1, 2], [3, 4], [5, 6]]).fit,
iris.data, iris.target)
assert_raise_message(ValueError, 'dim must be an positive int',
GmlvqModel(dim=0).fit, iris.data, iris.target)
GmlvqModel(dim=1, prototypes_per_class=2).fit(
iris.data, iris.target)
def test_grmlvq_iris():
check_estimator(GrmlvqModel)
c = [(0, 1, 0.9), (1, 0, 1.1)]
model = GrmlvqModel(prototypes_per_class=2, C=c, regularization=0.5)
model.fit(iris.data, iris.target)
assert_greater(model.score(iris.data, iris.target), score)
model = GrmlvqModel(initial_prototypes=[[0, 0, 0], [4, 4, 1]])
nb_ppc = 10
x = np.append(
np.random.multivariate_normal([0, 0], np.array([[0.3, 0], [0, 4]]),
size=nb_ppc),
np.random.multivariate_normal([4, 4], np.array([[0.3, 0], [0, 4]]),
size=nb_ppc), axis=0)
y = np.append(np.zeros(nb_ppc), np.ones(nb_ppc), axis=0)
model.fit(x, y)
assert_allclose(np.array([0.9, 0.1]), model.lambda_, atol=0.3)
#assert_allclose(np.array([[0.9, 0.2], [0.2, 0.3]]), model.omega_, atol=0.3) TODO: find more stable test
assert_raise_message(ValueError, 'regularization must be a positive float',
GrmlvqModel(regularization=-1.0).fit, iris.data,
iris.target)
assert_raise_message(ValueError,
'initial matrix has wrong number of features',
GrmlvqModel(
initial_matrix=[[1, 2], [3, 4], [5, 6]]).fit,
iris.data, iris.target)
assert_raise_message(ValueError, 'dim must be an positive int',
GrmlvqModel(dim=0).fit, iris.data, iris.target)
GrmlvqModel(dim=1, prototypes_per_class=2).fit(
iris.data, iris.target)
def test_lgmlvq_iris():
check_estimator(LgmlvqModel)
model = LgmlvqModel()
model.fit(iris.data, iris.target)
assert_greater(model.score(iris.data, iris.target), score)
assert_raise_message(ValueError, 'regularization must be a positive float',
LgmlvqModel(regularization=-1.0).fit, iris.data,
iris.target)
assert_raise_message(ValueError,
'length of regularization'
' must be number of prototypes',
LgmlvqModel(regularization=[-1.0]).fit, iris.data,
iris.target)
assert_raise_message(ValueError,
'length of regularization must be number of classes',
LgmlvqModel(regularization=[-1.0],
classwise=True).fit, iris.data,
iris.target)
assert_raise_message(ValueError, 'initial matrices must be a list',
LgmlvqModel(initial_matrices=np.array(
[[1, 2], [3, 4], [5, 6]])).fit, iris.data,
iris.target)
assert_raise_message(ValueError, 'length of matrices wrong',
LgmlvqModel(
initial_matrices=[[[1, 2], [3, 4], [5, 6]]]).fit,
iris.data, iris.target)
assert_raise_message(ValueError, 'each matrix must have',
LgmlvqModel(
initial_matrices=[[[1]], [[1]], [[1]]]).fit,
iris.data, iris.target)
assert_raise_message(ValueError, 'length of matrices wrong',
LgmlvqModel(initial_matrices=[[[1, 2, 3]]],
classwise=True).fit, iris.data,
iris.target)
assert_raise_message(ValueError, 'each matrix must have',
LgmlvqModel(initial_matrices=[[[1]], [[1]], [[1]]],
classwise=True).fit, iris.data,
iris.target)
assert_raise_message(ValueError, 'classwise must be a boolean',
LgmlvqModel(classwise="a").fit, iris.data,
iris.target)
assert_raise_message(ValueError, 'dim must be a list of positive ints',
LgmlvqModel(dim=[-1]).fit, iris.data, iris.target)
assert_raise_message(ValueError, 'dim length must be number of prototypes',
LgmlvqModel(dim=[1, 1]).fit, iris.data, iris.target)
assert_raise_message(ValueError, 'dim length must be number of classes',
LgmlvqModel(dim=[1, 1], classwise=True).fit,
iris.data, iris.target)
LgmlvqModel(classwise=True, dim=[1], prototypes_per_class=2).fit(
iris.data, iris.target)
model = LgmlvqModel(regularization=0.1)
model.fit(iris.data, iris.target)
model = LgmlvqModel(initial_prototypes=[[0, 2, 1], [1, 6, 2]],
initial_matrices=[np.ones([1, 2]), np.ones([1, 2])],
dim=[1, 1])
x = np.array([[0, 0], [0, 4], [1, 4], [1, 8]])
y = np.array([1, 1, 2, 2])
model.fit(x, y) | bsd-3-clause |
manashmndl/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
zymsys/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/hprModelFrame.py | 22 | 2847 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import math
from scipy.fftpack import fft, ifft, fftshift
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
import harmonicModel as HM
(fs, x) = UF.wavread('../../../sounds/flute-A4.wav')
pos = .8*fs
M = 601
hM1 = int(math.floor((M+1)/2))
hM2 = int(math.floor(M/2))
w = np.hamming(M)
N = 1024
t = -100
nH = 40
minf0 = 420
maxf0 = 460
f0et = 5
maxnpeaksTwm = 5
minSineDur = .1
harmDevSlope = 0.01
Ns = 512
H = Ns/4
x1 = x[pos-hM1:pos+hM2]
x2 = x[pos-Ns/2-1:pos+Ns/2-1]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
ipfreq = fs*iploc/N
f0 = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0)
hfreqp = []
hfreq, hmag, hphase = HM.harmonicDetection(ipfreq, ipmag, ipphase, f0, nH, hfreqp, fs, harmDevSlope)
Yh = UF.genSpecSines(hfreq, hmag, hphase, Ns, fs)
mYh = 20 * np.log10(abs(Yh[:Ns/2]))
pYh = np.unwrap(np.angle(Yh[:Ns/2]))
bh=blackmanharris(Ns)
X2 = fft(fftshift(x2*bh/sum(bh)))
Xr = X2-Yh
mXr = 20 * np.log10(abs(Xr[:Ns/2]))
pXr = np.unwrap(np.angle(Xr[:Ns/2]))
xrw = np.real(fftshift(ifft(Xr))) * H * 2
yhw = np.real(fftshift(ifft(Yh))) * H * 2
maxplotfreq = 8000.0
plt.figure(1, figsize=(9, 7))
plt.subplot(3,2,1)
plt.plot(np.arange(M), x[pos-hM1:pos+hM2]*w, lw=1.5)
plt.axis([0, M, min(x[pos-hM1:pos+hM2]*w), max(x[pos-hM1:pos+hM2]*w)])
plt.title('x (flute-A4.wav)')
plt.subplot(3,2,3)
binFreq = (fs/2.0)*np.arange(mX.size)/(mX.size)
plt.plot(binFreq,mX,'r', lw=1.5)
plt.axis([0,maxplotfreq,-90,max(mX)+2])
plt.plot(hfreq, hmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + harmonics')
plt.subplot(3,2,5)
plt.plot(binFreq,pX,'c', lw=1.5)
plt.axis([0,maxplotfreq,0,16])
plt.plot(hfreq, hphase, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('pX + harmonics')
plt.subplot(3,2,4)
binFreq = (fs/2.0)*np.arange(mXr.size)/(mXr.size)
plt.plot(binFreq,mYh,'r', lw=.8, label='mYh')
plt.plot(binFreq,mXr,'r', lw=1.5, label='mXr')
plt.axis([0,maxplotfreq,-90,max(mYh)+2])
plt.legend(prop={'size':10})
plt.title('mYh + mXr')
plt.subplot(3,2,6)
binFreq = (fs/2.0)*np.arange(mXr.size)/(mXr.size)
plt.plot(binFreq,pYh,'c', lw=.8, label='pYh')
plt.plot(binFreq,pXr,'c', lw=1.5, label ='pXr')
plt.axis([0,maxplotfreq,-5,25])
plt.legend(prop={'size':10})
plt.title('pYh + pXr')
plt.subplot(3,2,2)
plt.plot(np.arange(Ns), yhw, 'b', lw=.8, label='yh')
plt.plot(np.arange(Ns), xrw, 'b', lw=1.5, label='xr')
plt.axis([0, Ns, min(yhw), max(yhw)])
plt.legend(prop={'size':10})
plt.title('yh + xr')
plt.tight_layout()
plt.savefig('hprModelFrame.png')
plt.show()
| agpl-3.0 |
robin-lai/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 142 | 4467 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
voxlol/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
Winand/pandas | pandas/tests/scalar/test_period_asfreq.py | 15 | 35624 | import pandas as pd
from pandas import Period, offsets
from pandas.util import testing as tm
from pandas.tseries.frequencies import _period_code_map
class TestFreqConversion(object):
"""Test frequency conversion of date objects"""
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
result1 = val.asfreq('5t')
result2 = val.asfreq('t')
expected = Period('2007-12-31 23:59', freq='t')
assert result1.ordinal == expected.ordinal
assert result1.freqstr == '5T'
assert result2.ordinal == expected.ordinal
assert result2.freqstr == 'T'
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert ival_A.asfreq('Q', 'S') == ival_A_to_Q_start
assert ival_A.asfreq('Q', 'e') == ival_A_to_Q_end
assert ival_A.asfreq('M', 's') == ival_A_to_M_start
assert ival_A.asfreq('M', 'E') == ival_A_to_M_end
assert ival_A.asfreq('W', 'S') == ival_A_to_W_start
assert ival_A.asfreq('W', 'E') == ival_A_to_W_end
assert ival_A.asfreq('B', 'S') == ival_A_to_B_start
assert ival_A.asfreq('B', 'E') == ival_A_to_B_end
assert ival_A.asfreq('D', 'S') == ival_A_to_D_start
assert ival_A.asfreq('D', 'E') == ival_A_to_D_end
assert ival_A.asfreq('H', 'S') == ival_A_to_H_start
assert ival_A.asfreq('H', 'E') == ival_A_to_H_end
assert ival_A.asfreq('min', 'S') == ival_A_to_T_start
assert ival_A.asfreq('min', 'E') == ival_A_to_T_end
assert ival_A.asfreq('T', 'S') == ival_A_to_T_start
assert ival_A.asfreq('T', 'E') == ival_A_to_T_end
assert ival_A.asfreq('S', 'S') == ival_A_to_S_start
assert ival_A.asfreq('S', 'E') == ival_A_to_S_end
assert ival_AJAN.asfreq('D', 'S') == ival_AJAN_to_D_start
assert ival_AJAN.asfreq('D', 'E') == ival_AJAN_to_D_end
assert ival_AJUN.asfreq('D', 'S') == ival_AJUN_to_D_start
assert ival_AJUN.asfreq('D', 'E') == ival_AJUN_to_D_end
assert ival_ANOV.asfreq('D', 'S') == ival_ANOV_to_D_start
assert ival_ANOV.asfreq('D', 'E') == ival_ANOV_to_D_end
assert ival_A.asfreq('A') == ival_A
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31, hour=23,
minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert ival_Q.asfreq('A') == ival_Q_to_A
assert ival_Q_end_of_year.asfreq('A') == ival_Q_to_A
assert ival_Q.asfreq('M', 'S') == ival_Q_to_M_start
assert ival_Q.asfreq('M', 'E') == ival_Q_to_M_end
assert ival_Q.asfreq('W', 'S') == ival_Q_to_W_start
assert ival_Q.asfreq('W', 'E') == ival_Q_to_W_end
assert ival_Q.asfreq('B', 'S') == ival_Q_to_B_start
assert ival_Q.asfreq('B', 'E') == ival_Q_to_B_end
assert ival_Q.asfreq('D', 'S') == ival_Q_to_D_start
assert ival_Q.asfreq('D', 'E') == ival_Q_to_D_end
assert ival_Q.asfreq('H', 'S') == ival_Q_to_H_start
assert ival_Q.asfreq('H', 'E') == ival_Q_to_H_end
assert ival_Q.asfreq('Min', 'S') == ival_Q_to_T_start
assert ival_Q.asfreq('Min', 'E') == ival_Q_to_T_end
assert ival_Q.asfreq('S', 'S') == ival_Q_to_S_start
assert ival_Q.asfreq('S', 'E') == ival_Q_to_S_end
assert ival_QEJAN.asfreq('D', 'S') == ival_QEJAN_to_D_start
assert ival_QEJAN.asfreq('D', 'E') == ival_QEJAN_to_D_end
assert ival_QEJUN.asfreq('D', 'S') == ival_QEJUN_to_D_start
assert ival_QEJUN.asfreq('D', 'E') == ival_QEJUN_to_D_end
assert ival_Q.asfreq('Q') == ival_Q
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='W', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31, hour=23,
minute=59, second=59)
assert ival_M.asfreq('A') == ival_M_to_A
assert ival_M_end_of_year.asfreq('A') == ival_M_to_A
assert ival_M.asfreq('Q') == ival_M_to_Q
assert ival_M_end_of_quarter.asfreq('Q') == ival_M_to_Q
assert ival_M.asfreq('W', 'S') == ival_M_to_W_start
assert ival_M.asfreq('W', 'E') == ival_M_to_W_end
assert ival_M.asfreq('B', 'S') == ival_M_to_B_start
assert ival_M.asfreq('B', 'E') == ival_M_to_B_end
assert ival_M.asfreq('D', 'S') == ival_M_to_D_start
assert ival_M.asfreq('D', 'E') == ival_M_to_D_end
assert ival_M.asfreq('H', 'S') == ival_M_to_H_start
assert ival_M.asfreq('H', 'E') == ival_M_to_H_end
assert ival_M.asfreq('Min', 'S') == ival_M_to_T_start
assert ival_M.asfreq('Min', 'E') == ival_M_to_T_end
assert ival_M.asfreq('S', 'S') == ival_M_to_S_start
assert ival_M.asfreq('S', 'E') == ival_M_to_S_end
assert ival_M.asfreq('M') == ival_M
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='W', year=2007, month=1, day=1)
ival_WSUN = Period(freq='W', year=2007, month=1, day=7)
ival_WSAT = Period(freq='W-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='W-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='W-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='W-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='W-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='W-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='W', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='W', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='W', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007, quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007, quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7, hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7, hour=23,
minute=59, second=59)
assert ival_W.asfreq('A') == ival_W_to_A
assert ival_W_end_of_year.asfreq('A') == ival_W_to_A_end_of_year
assert ival_W.asfreq('Q') == ival_W_to_Q
assert ival_W_end_of_quarter.asfreq('Q') == ival_W_to_Q_end_of_quarter
assert ival_W.asfreq('M') == ival_W_to_M
assert ival_W_end_of_month.asfreq('M') == ival_W_to_M_end_of_month
assert ival_W.asfreq('B', 'S') == ival_W_to_B_start
assert ival_W.asfreq('B', 'E') == ival_W_to_B_end
assert ival_W.asfreq('D', 'S') == ival_W_to_D_start
assert ival_W.asfreq('D', 'E') == ival_W_to_D_end
assert ival_WSUN.asfreq('D', 'S') == ival_WSUN_to_D_start
assert ival_WSUN.asfreq('D', 'E') == ival_WSUN_to_D_end
assert ival_WSAT.asfreq('D', 'S') == ival_WSAT_to_D_start
assert ival_WSAT.asfreq('D', 'E') == ival_WSAT_to_D_end
assert ival_WFRI.asfreq('D', 'S') == ival_WFRI_to_D_start
assert ival_WFRI.asfreq('D', 'E') == ival_WFRI_to_D_end
assert ival_WTHU.asfreq('D', 'S') == ival_WTHU_to_D_start
assert ival_WTHU.asfreq('D', 'E') == ival_WTHU_to_D_end
assert ival_WWED.asfreq('D', 'S') == ival_WWED_to_D_start
assert ival_WWED.asfreq('D', 'E') == ival_WWED_to_D_end
assert ival_WTUE.asfreq('D', 'S') == ival_WTUE_to_D_start
assert ival_WTUE.asfreq('D', 'E') == ival_WTUE_to_D_end
assert ival_WMON.asfreq('D', 'S') == ival_WMON_to_D_start
assert ival_WMON.asfreq('D', 'E') == ival_WMON_to_D_end
assert ival_W.asfreq('H', 'S') == ival_W_to_H_start
assert ival_W.asfreq('H', 'E') == ival_W_to_H_end
assert ival_W.asfreq('Min', 'S') == ival_W_to_T_start
assert ival_W.asfreq('Min', 'E') == ival_W_to_T_end
assert ival_W.asfreq('S', 'S') == ival_W_to_S_start
assert ival_W.asfreq('S', 'E') == ival_W_to_S_end
assert ival_W.asfreq('W') == ival_W
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
ival_W.asfreq('WK')
def test_conv_weekly_legacy(self):
# frequency conversion tests: from Weekly Frequency
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
Period(freq='WK', year=2007, month=1, day=1)
with tm.assert_raises_regex(ValueError, msg):
Period(freq='WK-SAT', year=2007, month=1, day=6)
with tm.assert_raises_regex(ValueError, msg):
Period(freq='WK-FRI', year=2007, month=1, day=5)
with tm.assert_raises_regex(ValueError, msg):
Period(freq='WK-THU', year=2007, month=1, day=4)
with tm.assert_raises_regex(ValueError, msg):
Period(freq='WK-WED', year=2007, month=1, day=3)
with tm.assert_raises_regex(ValueError, msg):
Period(freq='WK-TUE', year=2007, month=1, day=2)
with tm.assert_raises_regex(ValueError, msg):
Period(freq='WK-MON', year=2007, month=1, day=1)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = Period(freq='M', year=2007, month=1)
ival_B_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_B_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_B_to_H_end = Period(freq='H', year=2007, month=1, day=1, hour=23)
ival_B_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_B_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_B_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_B_to_S_end = Period(freq='S', year=2007, month=1, day=1, hour=23,
minute=59, second=59)
assert ival_B.asfreq('A') == ival_B_to_A
assert ival_B_end_of_year.asfreq('A') == ival_B_to_A
assert ival_B.asfreq('Q') == ival_B_to_Q
assert ival_B_end_of_quarter.asfreq('Q') == ival_B_to_Q
assert ival_B.asfreq('M') == ival_B_to_M
assert ival_B_end_of_month.asfreq('M') == ival_B_to_M
assert ival_B.asfreq('W') == ival_B_to_W
assert ival_B_end_of_week.asfreq('W') == ival_B_to_W
assert ival_B.asfreq('D') == ival_B_to_D
assert ival_B.asfreq('H', 'S') == ival_B_to_H_start
assert ival_B.asfreq('H', 'E') == ival_B_to_H_end
assert ival_B.asfreq('Min', 'S') == ival_B_to_T_start
assert ival_B.asfreq('Min', 'E') == ival_B_to_T_end
assert ival_B.asfreq('S', 'S') == ival_B_to_S_start
assert ival_B.asfreq('S', 'E') == ival_B_to_S_end
assert ival_B.asfreq('B') == ival_B
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq='D', year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq='D', year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq='D', year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq='D', year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq='D', year=2007, month=1, day=7)
ival_D_friday = Period(freq='D', year=2007, month=1, day=5)
ival_D_saturday = Period(freq='D', year=2007, month=1, day=6)
ival_D_sunday = Period(freq='D', year=2007, month=1, day=7)
# TODO: unused?
# ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq='B', year=2007, month=1, day=5)
ival_B_monday = Period(freq='B', year=2007, month=1, day=8)
ival_D_to_A = Period(freq='A', year=2007)
ival_Deoq_to_AJAN = Period(freq='A-JAN', year=2008)
ival_Deoq_to_AJUN = Period(freq='A-JUN', year=2007)
ival_Deoq_to_ADEC = Period(freq='A-DEC', year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq='M', year=2007, month=1)
ival_D_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_D_to_H_end = Period(freq='H', year=2007, month=1, day=1, hour=23)
ival_D_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_D_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_D_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_D_to_S_end = Period(freq='S', year=2007, month=1, day=1, hour=23,
minute=59, second=59)
assert ival_D.asfreq('A') == ival_D_to_A
assert ival_D_end_of_quarter.asfreq('A-JAN') == ival_Deoq_to_AJAN
assert ival_D_end_of_quarter.asfreq('A-JUN') == ival_Deoq_to_AJUN
assert ival_D_end_of_quarter.asfreq('A-DEC') == ival_Deoq_to_ADEC
assert ival_D_end_of_year.asfreq('A') == ival_D_to_A
assert ival_D_end_of_quarter.asfreq('Q') == ival_D_to_QEDEC
assert ival_D.asfreq("Q-JAN") == ival_D_to_QEJAN
assert ival_D.asfreq("Q-JUN") == ival_D_to_QEJUN
assert ival_D.asfreq("Q-DEC") == ival_D_to_QEDEC
assert ival_D.asfreq('M') == ival_D_to_M
assert ival_D_end_of_month.asfreq('M') == ival_D_to_M
assert ival_D.asfreq('W') == ival_D_to_W
assert ival_D_end_of_week.asfreq('W') == ival_D_to_W
assert ival_D_friday.asfreq('B') == ival_B_friday
assert ival_D_saturday.asfreq('B', 'S') == ival_B_friday
assert ival_D_saturday.asfreq('B', 'E') == ival_B_monday
assert ival_D_sunday.asfreq('B', 'S') == ival_B_friday
assert ival_D_sunday.asfreq('B', 'E') == ival_B_monday
assert ival_D.asfreq('H', 'S') == ival_D_to_H_start
assert ival_D.asfreq('H', 'E') == ival_D_to_H_end
assert ival_D.asfreq('Min', 'S') == ival_D_to_T_start
assert ival_D.asfreq('Min', 'E') == ival_D_to_T_end
assert ival_D.asfreq('S', 'S') == ival_D_to_S_start
assert ival_D.asfreq('S', 'E') == ival_D_to_S_end
assert ival_D.asfreq('D') == ival_D
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_H_end_of_quarter = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_H_end_of_month = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_H_end_of_week = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_H_end_of_day = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_end_of_bus = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_to_A = Period(freq='A', year=2007)
ival_H_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_H_to_M = Period(freq='M', year=2007, month=1)
ival_H_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_H_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_H_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_H_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_H_to_T_end = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=59)
ival_H_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_H_to_S_end = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=59, second=59)
assert ival_H.asfreq('A') == ival_H_to_A
assert ival_H_end_of_year.asfreq('A') == ival_H_to_A
assert ival_H.asfreq('Q') == ival_H_to_Q
assert ival_H_end_of_quarter.asfreq('Q') == ival_H_to_Q
assert ival_H.asfreq('M') == ival_H_to_M
assert ival_H_end_of_month.asfreq('M') == ival_H_to_M
assert ival_H.asfreq('W') == ival_H_to_W
assert ival_H_end_of_week.asfreq('W') == ival_H_to_W
assert ival_H.asfreq('D') == ival_H_to_D
assert ival_H_end_of_day.asfreq('D') == ival_H_to_D
assert ival_H.asfreq('B') == ival_H_to_B
assert ival_H_end_of_bus.asfreq('B') == ival_H_to_B
assert ival_H.asfreq('Min', 'S') == ival_H_to_T_start
assert ival_H.asfreq('Min', 'E') == ival_H_to_T_end
assert ival_H.asfreq('S', 'S') == ival_H_to_S_start
assert ival_H.asfreq('S', 'E') == ival_H_to_S_end
assert ival_H.asfreq('H') == ival_H
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
ival_T_end_of_year = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_T_end_of_quarter = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_T_end_of_month = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_T_end_of_week = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_T_end_of_day = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_bus = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_hour = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_T_to_A = Period(freq='A', year=2007)
ival_T_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_T_to_M = Period(freq='M', year=2007, month=1)
ival_T_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_T_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_T_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_T_to_S_end = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=59)
assert ival_T.asfreq('A') == ival_T_to_A
assert ival_T_end_of_year.asfreq('A') == ival_T_to_A
assert ival_T.asfreq('Q') == ival_T_to_Q
assert ival_T_end_of_quarter.asfreq('Q') == ival_T_to_Q
assert ival_T.asfreq('M') == ival_T_to_M
assert ival_T_end_of_month.asfreq('M') == ival_T_to_M
assert ival_T.asfreq('W') == ival_T_to_W
assert ival_T_end_of_week.asfreq('W') == ival_T_to_W
assert ival_T.asfreq('D') == ival_T_to_D
assert ival_T_end_of_day.asfreq('D') == ival_T_to_D
assert ival_T.asfreq('B') == ival_T_to_B
assert ival_T_end_of_bus.asfreq('B') == ival_T_to_B
assert ival_T.asfreq('H') == ival_T_to_H
assert ival_T_end_of_hour.asfreq('H') == ival_T_to_H
assert ival_T.asfreq('S', 'S') == ival_T_to_S_start
assert ival_T.asfreq('S', 'E') == ival_T_to_S_end
assert ival_T.asfreq('Min') == ival_T
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq='S', year=2007, month=1, day=1, hour=0, minute=0,
second=0)
ival_S_end_of_year = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_quarter = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_month = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_week = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
ival_S_end_of_day = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_bus = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_hour = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
ival_S_end_of_minute = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
ival_S_to_A = Period(freq='A', year=2007)
ival_S_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_S_to_M = Period(freq='M', year=2007, month=1)
ival_S_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_S_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_S_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_S_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_S_to_T = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
assert ival_S.asfreq('A') == ival_S_to_A
assert ival_S_end_of_year.asfreq('A') == ival_S_to_A
assert ival_S.asfreq('Q') == ival_S_to_Q
assert ival_S_end_of_quarter.asfreq('Q') == ival_S_to_Q
assert ival_S.asfreq('M') == ival_S_to_M
assert ival_S_end_of_month.asfreq('M') == ival_S_to_M
assert ival_S.asfreq('W') == ival_S_to_W
assert ival_S_end_of_week.asfreq('W') == ival_S_to_W
assert ival_S.asfreq('D') == ival_S_to_D
assert ival_S_end_of_day.asfreq('D') == ival_S_to_D
assert ival_S.asfreq('B') == ival_S_to_B
assert ival_S_end_of_bus.asfreq('B') == ival_S_to_B
assert ival_S.asfreq('H') == ival_S_to_H
assert ival_S_end_of_hour.asfreq('H') == ival_S_to_H
assert ival_S.asfreq('Min') == ival_S_to_T
assert ival_S_end_of_minute.asfreq('Min') == ival_S_to_T
assert ival_S.asfreq('S') == ival_S
def test_asfreq_mult(self):
# normal freq to mult freq
p = Period(freq='A', year=2007)
# ordinal will not change
for freq in ['3A', offsets.YearEnd(3)]:
result = p.asfreq(freq)
expected = Period('2007', freq='3A')
assert result == expected
assert result.ordinal == expected.ordinal
assert result.freq == expected.freq
# ordinal will not change
for freq in ['3A', offsets.YearEnd(3)]:
result = p.asfreq(freq, how='S')
expected = Period('2007', freq='3A')
assert result == expected
assert result.ordinal == expected.ordinal
assert result.freq == expected.freq
# mult freq to normal freq
p = Period(freq='3A', year=2007)
# ordinal will change because how=E is the default
for freq in ['A', offsets.YearEnd()]:
result = p.asfreq(freq)
expected = Period('2009', freq='A')
assert result == expected
assert result.ordinal == expected.ordinal
assert result.freq == expected.freq
# ordinal will not change
for freq in ['A', offsets.YearEnd()]:
result = p.asfreq(freq, how='S')
expected = Period('2007', freq='A')
assert result == expected
assert result.ordinal == expected.ordinal
assert result.freq == expected.freq
p = Period(freq='A', year=2007)
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq)
expected = Period('2007-12', freq='2M')
assert result == expected
assert result.ordinal == expected.ordinal
assert result.freq == expected.freq
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq, how='S')
expected = Period('2007-01', freq='2M')
assert result == expected
assert result.ordinal == expected.ordinal
assert result.freq == expected.freq
p = Period(freq='3A', year=2007)
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq)
expected = Period('2009-12', freq='2M')
assert result == expected
assert result.ordinal == expected.ordinal
assert result.freq == expected.freq
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq, how='S')
expected = Period('2007-01', freq='2M')
assert result == expected
assert result.ordinal == expected.ordinal
assert result.freq == expected.freq
def test_asfreq_combined(self):
# normal freq to combined freq
p = Period('2007', freq='H')
# ordinal will not change
expected = Period('2007', freq='25H')
for freq, how in zip(['1D1H', '1H1D'], ['E', 'S']):
result = p.asfreq(freq, how=how)
assert result == expected
assert result.ordinal == expected.ordinal
assert result.freq == expected.freq
# combined freq to normal freq
p1 = Period(freq='1D1H', year=2007)
p2 = Period(freq='1H1D', year=2007)
# ordinal will change because how=E is the default
result1 = p1.asfreq('H')
result2 = p2.asfreq('H')
expected = Period('2007-01-02', freq='H')
assert result1 == expected
assert result1.ordinal == expected.ordinal
assert result1.freq == expected.freq
assert result2 == expected
assert result2.ordinal == expected.ordinal
assert result2.freq == expected.freq
# ordinal will not change
result1 = p1.asfreq('H', how='S')
result2 = p2.asfreq('H', how='S')
expected = Period('2007-01-01', freq='H')
assert result1 == expected
assert result1.ordinal == expected.ordinal
assert result1.freq == expected.freq
assert result2 == expected
assert result2.ordinal == expected.ordinal
assert result2.freq == expected.freq
def test_asfreq_MS(self):
initial = Period("2013")
assert initial.asfreq(freq="M", how="S") == Period('2013-01', 'M')
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
initial.asfreq(freq="MS", how="S")
with tm.assert_raises_regex(ValueError, msg):
pd.Period('2013-01', 'MS')
assert _period_code_map.get("MS") is None
| bsd-3-clause |
larsmans/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
jerome-nexedi/pulp-or | doc/source/_static/plotter.py | 4 | 1267 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from matplotlib import rc
rc('text', usetex=True)
rc('font', family='serif')
def plot_interval(a,c,x_left, x_right,i, fbound):
lh = c*(1-a[0])
rh = c*(1+a[1])
x=arange(x_left, x_right+1)
y=0*x
arrow_r = Arrow(c,0, c*a[1],0,0.2)
arrow_l = Arrow(c,0,-c*a[0],0,0.2)
plot(x,y)
text((x_left+lh)/2.0,0.1,'freebound interval [%s, %s] is penalty-free' % (lh,rh))
text((x_left+lh)/2.0, 0.2, 'rhs=%s, %s' % (c, fbound))
cur_ax = gca()
cur_ax.add_patch(arrow_l)
cur_ax.add_patch(arrow_r)
axis([x_left,x_right,-0.1,0.3])
yticks([])
title('Elasticized constraint\_%s $C(x)= %s $' % (i, c))
figure()
subplots_adjust(hspace=0.5)
fbound = 'proportionFreeBound'
i=1
subplot(2,1,i)
a=[0.01,0.01]
c = 200
x_left = 0.97*c
x_right = 1.03*c
fb_string = '%s%s = %s' %(fbound,'', a[0])
plot_interval(a,c,x_left, x_right,i, fb_string)
i += 1
subplot(2,1,i)
a=[0.02, 0.05]
c = 500
x_left = 0.9*c #scale of window
x_right = 1.2*c #scale of window
fb_string = '%s%s = [%s,%s]' % (fbound,'List', a[0],a[1])
plot_interval(a,c,x_left, x_right,i, fb_string)
savefig('freebound.jpg')
savefig('freebound.pdf')
# vim: fenc=utf-8: ft=python:sw=4:et:nu:fdm=indent:fdn=1:syn=python
| mit |
rgommers/statsmodels | statsmodels/miscmodels/try_mlecov.py | 33 | 7414 | '''Multivariate Normal Model with full covariance matrix
toeplitz structure is not exploited, need cholesky or inv for toeplitz
Author: josef-pktd
'''
from __future__ import print_function
import numpy as np
#from scipy import special #, stats
from scipy import linalg
from scipy.linalg import norm, toeplitz
import statsmodels.api as sm
from statsmodels.base.model import (GenericLikelihoodModel,
LikelihoodModel)
from statsmodels.tsa.arima_process import arma_acovf, arma_generate_sample
def mvn_loglike_sum(x, sigma):
'''loglike multivariate normal
copied from GLS and adjusted names
not sure why this differes from mvn_loglike
'''
nobs = len(x)
nobs2 = nobs / 2.0
SSR = (x**2).sum()
llf = -np.log(SSR) * nobs2 # concentrated likelihood
llf -= (1+np.log(np.pi/nobs2))*nobs2 # with likelihood constant
if np.any(sigma) and sigma.ndim == 2:
#FIXME: robust-enough check? unneeded if _det_sigma gets defined
llf -= .5*np.log(np.linalg.det(sigma))
return llf
def mvn_loglike(x, sigma):
'''loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient
'''
#see numpy thread
#Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)
sigmainv = linalg.inv(sigma)
logdetsigma = np.log(np.linalg.det(sigma))
nobs = len(x)
llf = - np.dot(x, np.dot(sigmainv, x))
llf -= nobs * np.log(2 * np.pi)
llf -= logdetsigma
llf *= 0.5
return llf
def mvn_loglike_chol(x, sigma):
'''loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient
'''
#see numpy thread
#Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)
sigmainv = np.linalg.inv(sigma)
cholsigmainv = np.linalg.cholesky(sigmainv).T
x_whitened = np.dot(cholsigmainv, x)
logdetsigma = np.log(np.linalg.det(sigma))
nobs = len(x)
from scipy import stats
print('scipy.stats')
print(np.log(stats.norm.pdf(x_whitened)).sum())
llf = - np.dot(x_whitened.T, x_whitened)
llf -= nobs * np.log(2 * np.pi)
llf -= logdetsigma
llf *= 0.5
return llf, logdetsigma, 2 * np.sum(np.log(np.diagonal(cholsigmainv)))
#0.5 * np.dot(x_whitened.T, x_whitened) + nobs * np.log(2 * np.pi) + logdetsigma)
def mvn_nloglike_obs(x, sigma):
'''loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient
'''
#see numpy thread
#Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)
#Still wasteful to calculate pinv first
sigmainv = np.linalg.inv(sigma)
cholsigmainv = np.linalg.cholesky(sigmainv).T
#2 * np.sum(np.log(np.diagonal(np.linalg.cholesky(A)))) #Dag mailinglist
# logdet not needed ???
#logdetsigma = 2 * np.sum(np.log(np.diagonal(cholsigmainv)))
x_whitened = np.dot(cholsigmainv, x)
#sigmainv = linalg.cholesky(sigma)
logdetsigma = np.log(np.linalg.det(sigma))
sigma2 = 1. # error variance is included in sigma
llike = 0.5 * (np.log(sigma2) - 2.* np.log(np.diagonal(cholsigmainv))
+ (x_whitened**2)/sigma2
+ np.log(2*np.pi))
return llike
def invertibleroots(ma):
import numpy.polynomial as poly
pr = poly.polyroots(ma)
insideroots = np.abs(pr)<1
if insideroots.any():
pr[np.abs(pr)<1] = 1./pr[np.abs(pr)<1]
pnew = poly.Polynomial.fromroots(pr)
mainv = pn.coef/pnew.coef[0]
wasinvertible = False
else:
mainv = ma
wasinvertible = True
return mainv, wasinvertible
def getpoly(self, params):
ar = np.r_[[1], -params[:self.nar]]
ma = np.r_[[1], params[-self.nma:]]
import numpy.polynomial as poly
return poly.Polynomial(ar), poly.Polynomial(ma)
class MLEGLS(GenericLikelihoodModel):
'''ARMA model with exact loglikelhood for short time series
Inverts (nobs, nobs) matrix, use only for nobs <= 200 or so.
This class is a pattern for small sample GLS-like models. Intended use
for loglikelihood of initial observations for ARMA.
TODO:
This might be missing the error variance. Does it assume error is
distributed N(0,1)
Maybe extend to mean handling, or assume it is already removed.
'''
def _params2cov(self, params, nobs):
'''get autocovariance matrix from ARMA regression parameter
ar parameters are assumed to have rhs parameterization
'''
ar = np.r_[[1], -params[:self.nar]]
ma = np.r_[[1], params[-self.nma:]]
#print('ar', ar
#print('ma', ma
#print('nobs', nobs
autocov = arma_acovf(ar, ma, nobs=nobs)
#print('arma_acovf(%r, %r, nobs=%d)' % (ar, ma, nobs)
#print(autocov.shape
#something is strange fixed in aram_acovf
autocov = autocov[:nobs]
sigma = toeplitz(autocov)
return sigma
def loglike(self, params):
sig = self._params2cov(params[:-1], self.nobs)
sig = sig * params[-1]**2
loglik = mvn_loglike(self.endog, sig)
return loglik
def fit_invertible(self, *args, **kwds):
res = self.fit(*args, **kwds)
ma = np.r_[[1], res.params[self.nar: self.nar+self.nma]]
mainv, wasinvertible = invertibleroots(ma)
if not wasinvertible:
start_params = res.params.copy()
start_params[self.nar: self.nar+self.nma] = mainv[1:]
#need to add args kwds
res = self.fit(start_params=start_params)
return res
if __name__ == '__main__':
nobs = 50
ar = [1.0, -0.8, 0.1]
ma = [1.0, 0.1, 0.2]
#ma = [1]
np.random.seed(9875789)
y = arma_generate_sample(ar,ma,nobs,2)
y -= y.mean() #I haven't checked treatment of mean yet, so remove
mod = MLEGLS(y)
mod.nar, mod.nma = 2, 2 #needs to be added, no init method
mod.nobs = len(y)
res = mod.fit(start_params=[0.1, -0.8, 0.2, 0.1, 1.])
print('DGP', ar, ma)
print(res.params)
from statsmodels.regression import yule_walker
print(yule_walker(y, 2))
#resi = mod.fit_invertible(start_params=[0.1,0,0.2,0, 0.5])
#print(resi.params
arpoly, mapoly = getpoly(mod, res.params[:-1])
data = sm.datasets.sunspots.load()
#ys = data.endog[-100:]
## ys = data.endog[12:]-data.endog[:-12]
## ys -= ys.mean()
## mods = MLEGLS(ys)
## mods.nar, mods.nma = 13, 1 #needs to be added, no init method
## mods.nobs = len(ys)
## ress = mods.fit(start_params=np.r_[0.4, np.zeros(12), [0.2, 5.]],maxiter=200)
## print(ress.params
## #from statsmodels.sandbox.tsa import arima as tsaa
## #tsaa
## import matplotlib.pyplot as plt
## plt.plot(data.endog[1])
## #plt.show()
sigma = mod._params2cov(res.params[:-1], nobs) * res.params[-1]**2
print(mvn_loglike(y, sigma))
llo = mvn_nloglike_obs(y, sigma)
print(llo.sum(), llo.shape)
print(mvn_loglike_chol(y, sigma))
print(mvn_loglike_sum(y, sigma))
| bsd-3-clause |
pmelchior/shear-stacking-tests | run_quadrant_check.py | 2 | 6519 | #!/bin/env python
import json, errno
import healpy as hp
import healpix_util as hu
import numpy as np
from sys import argv
from shear_stacking import *
def makeDensityMap(outfile, config, shapes, nside=512):
ipix = hp.ang2pix(nside, (90-shapes[config['shape_dec_key']])/180*np.pi, shapes[config['shape_ra_key']]/180*np.pi, nest=False)
bc = np.bincount(ipix, minlength=hp.nside2npix(nside))
hp.write_map(outfile, bc)
return bc
""" for plotting only"""
def lon2RA(lon):
lon = 360 - lon
hours = int(lon)/15
minutes = int(float(lon - hours*15)/15 * 60)
minutes = '{:>02}'.format(minutes)
return "%d:%sh" % (hours, minutes)
def getCountLocation(config, shapes, nside=512):
ipix = hp.ang2pix(nside, (90-shapes[config['shape_dec_key']])/180*np.pi, shapes[config['shape_ra_key']]/180*np.pi, nest=False)
bc = np.bincount(ipix)
pixels = np.nonzero(bc)[0]
bc = bc[bc>0] / hp.nside2resol(nside, arcmin=True)**2 # in arcmin^-2
theta, phi = hp.pix2ang(nside, pixels, nest=False)
lat = 90 - theta*180/np.pi
lon = phi*180/np.pi
return bc, lat, lon
from mpl_toolkits.basemap import Basemap
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def plotDensityMap(config, shapes, nside=512):
# set up figure
setTeXPlot(2*nside/512)
fig = plt.figure(figsize=(6.5*nside/512,6*nside/512))
ax = fig.add_axes([0.07,0.07,0.84,0.9], aspect='equal')
# equal-area map straight above the footprint center
m = Basemap(projection='aea',width=2000000,height=2200000,
lat_0=-52.5, lat_1=-61, lat_2=-42., lon_0=-75.)
# after cuts
vmin,vmax = 0,10
bc, lat, lon = getCountLocation(config, shapes, nside=nside)
x,y = m(-lon, lat)
sc = m.scatter(x,y,c=bc, linewidths=0, s=10, marker='s', cmap=cm.YlOrRd, vmin=vmin, vmax=vmax, rasterized=True, ax=ax)
#sc = m.scatter(x,y,c=bc, linewidths=0, s=8, marker='h', cmap=cm.jet, vmin=vmin, vmax=vmax, rasterized=True)#, norm=matplotlib.colors.LogNorm())
# draw parallels and meridians.
# label on left and bottom of map.
parallels = np.arange(-75.,0.,5.)
m.drawparallels(parallels,labels=[1,0,0,0], labelstyle="+/-", linewidth=0.5)
meridians = np.arange(0.,360.,5.)
m.drawmeridians(meridians,labels=[0,0,0,1], fmt=lon2RA, linewidth=0.5)
# add colorbar
cb = m.colorbar(sc,"right", size="3%", pad='0%')
cb.set_label('$n_g\ [\mathrm{arcmin}^{-2}]$')
cb.solids.set_edgecolor("face")
#plt.show()
plt.savefig('depth_map_quadrant_check.pdf', transparent=True)
plt.savefig('depth_map_quadrant_check.png')
""" end plotting """
if __name__ == '__main__':
# parse inputs
try:
configfile = argv[1]
except IndexError:
print "usage: " + argv[0] + " <config file>"
raise SystemExit
try:
fp = open(configfile)
print "opening configfile " + configfile
config = json.load(fp)
fp.close()
except IOError:
print "configfile " + configfile + " does not exist!"
raise SystemExit
if config['coords'] not in ['angular', 'physical']:
print "config: specify either 'angular' or 'physical' coordinates"
raise SystemExit
# see if we need to do anything
append_to_extra = False
try:
hdu = fitsio.FITS(config['lens_extra_file'])
columns = hdu[1].get_colnames()
hdu.close()
if 'quad_flags' in columns:
print "Quadrant check flags already in " + config['lens_extra_file']
print "Delete file if you want to regenerate them."
raise SystemExit
else:
append_to_extra = True
except (KeyError, IOError) as exc: # not in config or file doesn't exist
pass
# open shape catalog
outdir = os.path.dirname(configfile) + "/"
shapefile = config['shape_file']
# since all selection are normally in the extra file (if present)
# we speed up the process by changing extra and shape and dropping shape
try:
extrafile = config['shape_file_extra']
config['shape_file'] = extrafile
del config['shape_file_extra']
except KeyError:
pass
shapes = getShapeCatalog(config, verbose=True)
if shapes.size:
basename = os.path.basename(shapefile)
basename = ".".join(basename.split(".")[:-1])
densityfile = outdir + basename + '_density.fits'
# make healpix map of density of all shapes
makeDensityMap(densityfile, config, shapes, nside=1024)
print "created healpix density map %s" % densityfile
dmap=hu.readDensityMap(densityfile)
plotDensityMap(config, shapes, nside=1024)
# open lens catalog for quadrant check
# we need to remove any lens cuts since we want the check for all
# lenses in the lens_file
config['lens_cuts'] = []
lenses = getLensCatalog(config, verbose=True)
# check quadrants around the input points
# make sure weighted position ellipticity in adjacent quadrants
# less than 0.05
ellip_max=0.05
data = np.zeros(lenses.size, dtype=[('quad_flags', 'i1')])
# match the outer radius to the range asked for stacking
if config['coords'] == "physical":
radius_degrees = Dist2Ang(config['maxrange'], lenses[config['lens_z_key']])
else:
radius_degrees = config['maxrange'] * np.ones(lenses.size)
for i in xrange(lenses.size):
lens = lenses[i]
data['quad_flags'][i] = dmap.check_quad(lens[config['lens_ra_key']], lens[config['lens_dec_key']], radius_degrees[i], ellip_max)
# save result as table
if append_to_extra == False:
lensfile = config['lens_file']
basename = os.path.basename(lensfile)
basename = ".".join(basename.split(".")[:-1])
quadfile = outdir + basename + '_quadrant-check.fits'
fits = fitsio.FITS(quadfile, 'rw', clobber=True)
fits.write(data)
fits.close()
print "created quadrant check file %s" % quadfile
if 'lens_extra_file' not in config.keys():
print "\nBefore proceeding: add"
print " \"lens_extra_file\": \"%s\"" % quadfile
print "to your config file!"
else:
fits = fitsio.FITS(config['lens_extra_file'], 'rw')
fits[1].insert_column('quad_flags', data['quad_flags'])
fits.close()
| mit |
infoelliex/addons-yelizariev | import_custom/import_custom.py | 16 | 10226 | # -*- coding: utf-8 -*-
import logging
import os
_logger = logging.getLogger(__name__)
try:
import MySQLdb
import MySQLdb.cursors
except ImportError:
pass
from openerp.addons.import_framework.import_base import import_base
try:
from pandas import merge, DataFrame
except ImportError:
pass
from openerp.addons.import_framework.mapper import *
import re
import time
import datetime as DT
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import csv
import glob
from openerp.osv.fields import sanitize_binary_value
class fixdate_custom(mapper):
"""
convert '2010/12/31 13:26:25' to '2010-12-31'
"""
def __init__(self, field_name):
self.field_name = field_name
def __call__(self, external_values):
s = external_values.get(self.field_name)
if not s:
return ''
m,d,y = str(s).split(' ')[0].split('/')
return '20%s-%s-%s' % (y,m,d)
class image(mapper):
def __init__(self, val):
self.val = val
def __call__(self, external_values):
val = external_values.get(self.val)
files = glob.glob('/home/tmp/thumbs/%s_*' % val)
max_file = None
max_size = 0
for f in files:
size = os.path.getsize(f)
if size > 93000:
continue
if size < max_size:
continue
max_size = size
max_file = f
if not max_file:
return None
with open(max_file, 'r') as f:
b = f.read()
val = sanitize_binary_value(b)
return val
class import_custom(import_base):
TABLE_PROSPECTS = 'prospects_burda'
TABLE_PROSPECTS_TAG = TABLE_PROSPECTS + '_tag'
TABLE_PRODUCT = 'products'
TABLE_PRODUCT_CATEGORY = 'categories'
COL_LINE_NUM = 'line_num'
def initialize(self):
self.csv_files = self.context.get('csv_files')
self.import_options.update({'separator':',',
#'quoting':''
})
def get_data(self, table):
file_name = filter(lambda f: f.endswith('/%s.csv' % table), self.csv_files)
if file_name:
_logger.info('read file "%s"' % ( '%s.csv' % table))
file_name = file_name[0]
else:
_logger.info('file not found %s' % ( '%s.csv' % table))
return []
with open(file_name, 'rb') as csvfile:
fixed_file = StringIO(csvfile.read() .replace('\r\n', '\n'))
reader = csv.DictReader(fixed_file,
delimiter = self.import_options.get('separator'),
#quotechar = self.import_options.get('quoting'),
)
res = list(reader)
for line_num, line in enumerate(res):
line[self.COL_LINE_NUM] = str(line_num)
return res
def get_mapping(self):
return [
self.get_mapping_partners(),
self.get_mapping_product_categories(),
self.get_mapping_products(),
]
def get_table(self, table):
def f():
t = DataFrame(self.get_data(table))
#t = t[:10] # for debug
return t
return f
def get_hook_tag(self, field_name):
def f(external_values):
res = []
value = external_values.get(field_name)
value = value or ''
if not isinstance(value, basestring):
value = str(value)
for v in value.split(','):
#v = do_clean_sugar(v)
if v:
res.append({field_name:v})
return res
return f
def tag(self, model, xml_id_prefix, field_name):
parent = xml_id_prefix + field_name
return {'model':model,
'hook':self.get_hook_tag(field_name),
'fields': {
'id': xml_id(parent, field_name),
'name': field_name,
#'parent_id/id':const('sugarcrm_migration.'+parent),
}
}
def get_mapping_partners(self):
return {
'name': self.TABLE_PROSPECTS,
'table': self.get_table(self.TABLE_PROSPECTS),
'dependencies' : [],
'models':[
self.tag('res.partner.category', self.TABLE_PROSPECTS_TAG, 'Tag'),
self.tag('res.partner.category', self.TABLE_PROSPECTS_TAG, 'Tags'),
self.tag('res.partner.category', self.TABLE_PROSPECTS_TAG, 'TypeName'),
{'model' : 'res.partner',
'fields': {
'id': xml_id(self.TABLE_PROSPECTS, 'External ID'),
'name': 'Name',
'lang': const('es_ES'),
'is_company': map_val('Is a Company', {'True':'1', 'False':'0'}, default='0'),
'customer': const('1'),
'supplier': const('0'),
'category_id/id': tags_from_fields(self.TABLE_PROSPECTS_TAG, ['Tag','Tags', 'TypeName']),
'street': 'Street',
'street2': 'Street2',
'zip': 'Zip',
'city': 'City',
'phone': 'Phone',
'mobile': 'Mobile',
'email': 'Email',
'country_id/.id': country_by_name('Country'),
'date': fixdate_custom('CreationDate'),
'comment': ppconcat('Subscription'),
}
},
{'model' : 'res.partner',
'hook': self.get_hook_ignore_empty('ContactLastname', 'ContactEmail'),
'fields': {
'id': xml_id(self.TABLE_PROSPECTS+'_child', 'External ID'),
'parent_id/id': xml_id(self.TABLE_PROSPECTS, 'External ID'),
'name': concat('ContactTitle', 'ContactFirstname', 'ContactLastname', delimiter=' '),
'customer': const('1'),
'supplier': const('0'),
'function': 'ContactJobtitle',
'phone': 'ContactPhone',
'fax': 'ContactFax',
'email': 'ContactEmail',
'lang': const('es_ES'),
'comment': ppconcat('ContactGender'),
}
}
]
}
def get_mapping_product_categories(self):
return {
'name': self.TABLE_PRODUCT_CATEGORY,
'table': self.get_table(self.TABLE_PRODUCT_CATEGORY),
'dependencies' : [],
'models':[
{'model' : 'product.public.category',
'fields': {
'id': xml_id(self.TABLE_PRODUCT_CATEGORY, 'id'),
'name': 'label',
},
},
{'model' : 'product.public.category',
'hook': lambda vals: vals.get('parent_id')!='NULL' and vals or None,
'fields': {
'id': xml_id(self.TABLE_PRODUCT_CATEGORY, 'id'),
'name': 'label',
'parent_id/id': xml_id(self.TABLE_PRODUCT_CATEGORY, 'parent_id'),
},
},
]
}
def table_product(self):
t = DataFrame(self.get_data('ecom_items'))
t = merge(t,
DataFrame(self.get_data('ecom_items_ref')),
how='left',
left_on='ID',
suffixes=('', '_ref'),
right_on='ecom_items_id')
t = merge(t,
DataFrame(self.get_data('item_categories')),
how='left',
left_on='ID',
suffixes=('', '_categories'),
right_on='ecom_items_id')
#t = merge(t,
# DataFrame(self.get_data('thumbs')),
# how='left',
# left_on='id', # from ecom_items_ref
# suffixes=('', '_thumbs'),
# right_on='ecom_items_ref_id')
#t = t[:500] # for debug
return t
def get_mapping_products(self):
return {
'name': self.TABLE_PRODUCT,
'table': self.table_product,
'dependencies' : [self.TABLE_PRODUCT_CATEGORY],
'models':[
{'model':'product.category',
'fields': {
'id': xml_id(self.TABLE_PRODUCT + '_brand', 'Brand'),
'name': 'Brand',
}
},
{'model' : 'product.product',
'split' : 1000,
'fields': {
'id': xml_id(self.TABLE_PRODUCT, 'ID'),
'categ_id/id': xml_id(self.TABLE_PRODUCT + '_brand', 'Brand'),
'name': 'Label',
'website_published': 'published',
'default_code': 'ID',
'standard_price': 'price_purchase',
'lst_price': 'price_sales',
'active': lambda record: not int(record['disabled']),
'public_categ_id/id': xml_id(self.TABLE_PRODUCT_CATEGORY, 'ecom_category_id'),
'image_medium': image('id'),
'description': ppconcat(
'color',
'weight',
'size',
'custom_code',
#'price_purchase',
'vat_code',
#'price_sales',
'stock_min',
'stock_max',
'packaging',
'packaging_pro',
'packaging_public',
'tags',
'eco_tax',
'EAN_code',
'disabled',
'body'
),
},
}
]
}
| lgpl-3.0 |
plissonf/scikit-learn | sklearn/decomposition/dict_learning.py | 104 | 44632 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=False)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
# Transposing product to ensure Fortran ordering
gram = np.dot(dictionary, dictionary.T).T
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter)
# This ensure that dimensionality of code is always 2,
# consistant with the case n_jobs > 1
if code.ndim == 1:
code = code[np.newaxis, :]
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
jaeilepp/mne-python | mne/viz/tests/test_utils.py | 3 | 4893 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from nose.tools import assert_true, assert_raises
from numpy.testing import assert_allclose
from mne.viz.utils import (compare_fiff, _fake_click, _compute_scalings,
_validate_if_list_of_axes)
from mne.viz import ClickableImage, add_background_image, mne_analyze_colormap
from mne.utils import run_tests_if_main
from mne.io import read_raw_fif
from mne.event import read_events
from mne.epochs import Epochs
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
ev_fname = op.join(base_dir, 'test_raw-eve.fif')
def test_mne_analyze_colormap():
"""Test mne_analyze_colormap."""
assert_raises(ValueError, mne_analyze_colormap, [0])
assert_raises(ValueError, mne_analyze_colormap, [-1, 1, 2])
assert_raises(ValueError, mne_analyze_colormap, [0, 2, 1])
def test_compare_fiff():
import matplotlib.pyplot as plt
compare_fiff(raw_fname, cov_fname, read_limit=0, show=False)
plt.close('all')
def test_clickable_image():
"""Test the ClickableImage class."""
# Gen data and create clickable image
import matplotlib.pyplot as plt
im = np.random.RandomState(0).randn(100, 100)
clk = ClickableImage(im)
clicks = [(12, 8), (46, 48), (10, 24)]
# Generate clicks
for click in clicks:
_fake_click(clk.fig, clk.ax, click, xform='data')
assert_allclose(np.array(clicks), np.array(clk.coords))
assert_true(len(clicks) == len(clk.coords))
# Exporting to layout
lt = clk.to_layout()
assert_true(lt.pos.shape[0] == len(clicks))
assert_allclose(lt.pos[1, 0] / lt.pos[2, 0],
clicks[1][0] / float(clicks[2][0]))
clk.plot_clicks()
plt.close('all')
def test_add_background_image():
"""Test adding background image to a figure."""
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
f, axs = plt.subplots(1, 2)
x, y = rng.randn(2, 10)
im = rng.randn(10, 10)
axs[0].scatter(x, y)
axs[1].scatter(y, x)
for ax in axs:
ax.set_aspect(1)
# Background without changing aspect
ax_im = add_background_image(f, im)
assert_true(ax_im.get_aspect() == 'auto')
for ax in axs:
assert_true(ax.get_aspect() == 1)
# Background with changing aspect
ax_im_asp = add_background_image(f, im, set_ratios='auto')
assert_true(ax_im_asp.get_aspect() == 'auto')
for ax in axs:
assert_true(ax.get_aspect() == 'auto')
# Make sure passing None as image returns None
assert_true(add_background_image(f, None) is None)
def test_auto_scale():
"""Test auto-scaling of channels for quick plotting."""
raw = read_raw_fif(raw_fname)
epochs = Epochs(raw, read_events(ev_fname))
rand_data = np.random.randn(10, 100)
for inst in [raw, epochs]:
scale_grad = 1e10
scalings_def = dict([('eeg', 'auto'), ('grad', scale_grad),
('stim', 'auto')])
# Test for wrong inputs
assert_raises(ValueError, inst.plot, scalings='foo')
assert_raises(ValueError, _compute_scalings, 'foo', inst)
# Make sure compute_scalings doesn't change anything not auto
scalings_new = _compute_scalings(scalings_def, inst)
assert_true(scale_grad == scalings_new['grad'])
assert_true(scalings_new['eeg'] != 'auto')
assert_raises(ValueError, _compute_scalings, scalings_def, rand_data)
epochs = epochs[0].load_data()
epochs.pick_types(eeg=True, meg=False)
assert_raises(ValueError, _compute_scalings,
dict(grad='auto'), epochs)
def test_validate_if_list_of_axes():
"""Test validation of axes."""
import matplotlib.pyplot as plt
fig, ax = plt.subplots(2, 2)
assert_raises(ValueError, _validate_if_list_of_axes, ax)
ax_flat = ax.ravel()
ax = ax.ravel().tolist()
_validate_if_list_of_axes(ax_flat)
_validate_if_list_of_axes(ax_flat, 4)
assert_raises(ValueError, _validate_if_list_of_axes, ax_flat, 5)
assert_raises(ValueError, _validate_if_list_of_axes, ax, 3)
assert_raises(ValueError, _validate_if_list_of_axes, 'error')
assert_raises(ValueError, _validate_if_list_of_axes, ['error'] * 2)
assert_raises(ValueError, _validate_if_list_of_axes, ax[0])
assert_raises(ValueError, _validate_if_list_of_axes, ax, 3)
ax_flat[2] = 23
assert_raises(ValueError, _validate_if_list_of_axes, ax_flat)
_validate_if_list_of_axes(ax, 4)
run_tests_if_main()
| bsd-3-clause |
cybernet14/scikit-learn | sklearn/utils/multiclass.py | 45 | 12390 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
cython-testbed/pandas | pandas/io/formats/console.py | 3 | 4533 | """
Internal module for console introspection
"""
import sys
import locale
from pandas.io.formats.terminal import get_terminal_size
# -----------------------------------------------------------------------------
# Global formatting options
_initial_defencoding = None
def detect_console_encoding():
"""
Try to find the most capable encoding supported by the console.
slightly modified from the way IPython handles the same issue.
"""
global _initial_defencoding
encoding = None
try:
encoding = sys.stdout.encoding or sys.stdin.encoding
except (AttributeError, IOError):
pass
# try again for something better
if not encoding or 'ascii' in encoding.lower():
try:
encoding = locale.getpreferredencoding()
except Exception:
pass
# when all else fails. this will usually be "ascii"
if not encoding or 'ascii' in encoding.lower():
encoding = sys.getdefaultencoding()
# GH3360, save the reported defencoding at import time
# MPL backends may change it. Make available for debugging.
if not _initial_defencoding:
_initial_defencoding = sys.getdefaultencoding()
return encoding
def get_console_size():
"""Return console size as tuple = (width, height).
Returns (None,None) in non-interactive session.
"""
from pandas import get_option
display_width = get_option('display.width')
# deprecated.
display_height = get_option('display.max_rows')
# Consider
# interactive shell terminal, can detect term size
# interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term
# size non-interactive script, should disregard term size
# in addition
# width,height have default values, but setting to 'None' signals
# should use Auto-Detection, But only in interactive shell-terminal.
# Simple. yeah.
if in_interactive_session():
if in_ipython_frontend():
# sane defaults for interactive non-shell terminal
# match default for width,height in config_init
from pandas.core.config import get_default_val
terminal_width = get_default_val('display.width')
terminal_height = get_default_val('display.max_rows')
else:
# pure terminal
terminal_width, terminal_height = get_terminal_size()
else:
terminal_width, terminal_height = None, None
# Note if the User sets width/Height to None (auto-detection)
# and we're in a script (non-inter), this will return (None,None)
# caller needs to deal.
return (display_width or terminal_width, display_height or terminal_height)
# ----------------------------------------------------------------------
# Detect our environment
def in_interactive_session():
""" check if we're running in an interactive shell
returns True if running under python/ipython interactive shell
"""
from pandas import get_option
def check_main():
import __main__ as main
return (not hasattr(main, '__file__') or
get_option('mode.sim_interactive'))
try:
return __IPYTHON__ or check_main() # noqa
except NameError:
return check_main()
def in_qtconsole():
"""
check if we're inside an IPython qtconsole
.. deprecated:: 0.14.1
This is no longer needed, or working, in IPython 3 and above.
"""
try:
ip = get_ipython() # noqa
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
ip.config.get('IPKernelApp', {}).get('parent_appname', ""))
if 'qtconsole' in front_end.lower():
return True
except NameError:
return False
return False
def in_ipnb():
"""
check if we're inside an IPython Notebook
.. deprecated:: 0.14.1
This is no longer needed, or working, in IPython 3 and above.
"""
try:
ip = get_ipython() # noqa
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
ip.config.get('IPKernelApp', {}).get('parent_appname', ""))
if 'notebook' in front_end.lower():
return True
except NameError:
return False
return False
def in_ipython_frontend():
"""
check if we're inside an an IPython zmq frontend
"""
try:
ip = get_ipython() # noqa
return 'zmq' in str(type(ip)).lower()
except NameError:
pass
return False
| bsd-3-clause |
robin-lai/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 234 | 12267 | # Authors: Lars Buitinck
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
adammenges/statsmodels | statsmodels/sandbox/examples/thirdparty/ex_ratereturn.py | 33 | 4394 | # -*- coding: utf-8 -*-
"""Playing with correlation of DJ-30 stock returns
this uses pickled data that needs to be created with findow.py
to see graphs, uncomment plt.show()
Created on Sat Jan 30 16:30:18 2010
Author: josef-pktd
"""
import numpy as np
import matplotlib.finance as fin
import matplotlib.pyplot as plt
import datetime as dt
import pandas as pa
from statsmodels.compat.python import cPickle
import statsmodels.api as sm
import statsmodels.sandbox as sb
import statsmodels.sandbox.tools as sbtools
from statsmodels.graphics.correlation import plot_corr, plot_corr_grid
try:
rrdm = cPickle.load(file('dj30rr','rb'))
except Exception: #blanket for any unpickling error
print("Error with unpickling, a new pickle file can be created with findow_1")
raise
ticksym = rrdm.columns.tolist()
rr = rrdm.values[1:400]
rrcorr = np.corrcoef(rr, rowvar=0)
plot_corr(rrcorr, xnames=ticksym)
nvars = rrcorr.shape[0]
plt.figure()
plt.hist(rrcorr[np.triu_indices(nvars,1)])
plt.title('Correlation Coefficients')
xreda, facta, evaa, evea = sbtools.pcasvd(rr)
evallcs = (evaa).cumsum()
print(evallcs/evallcs[-1])
xred, fact, eva, eve = sbtools.pcasvd(rr, keepdim=4)
pcacorr = np.corrcoef(xred, rowvar=0)
plot_corr(pcacorr, xnames=ticksym, title='Correlation PCA')
resid = rr-xred
residcorr = np.corrcoef(resid, rowvar=0)
plot_corr(residcorr, xnames=ticksym, title='Correlation Residuals')
plt.matshow(residcorr)
plt.imshow(residcorr, cmap=plt.cm.jet, interpolation='nearest',
extent=(0,30,0,30), vmin=-1.0, vmax=1.0)
plt.colorbar()
normcolor = (0,1) #False #True
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
plot_corr(rrcorr, xnames=ticksym, normcolor=normcolor, ax=ax)
ax2 = fig.add_subplot(2,2,3)
#pcacorr = np.corrcoef(xred, rowvar=0)
plot_corr(pcacorr, xnames=ticksym, title='Correlation PCA',
normcolor=normcolor, ax=ax2)
ax3 = fig.add_subplot(2,2,4)
plot_corr(residcorr, xnames=ticksym, title='Correlation Residuals',
normcolor=normcolor, ax=ax3)
import matplotlib as mpl
images = [c for ax in fig.axes for c in ax.get_children() if isinstance(c, mpl.image.AxesImage)]
print(images)
print(ax.get_children())
#cax = fig.add_subplot(2,2,2)
#[0.85, 0.1, 0.075, 0.8]
fig. subplots_adjust(bottom=0.1, right=0.9, top=0.9)
cax = fig.add_axes([0.9, 0.1, 0.025, 0.8])
fig.colorbar(images[0], cax=cax)
fig.savefig('corrmatrixgrid.png', dpi=120)
has_sklearn = True
try:
import sklearn
except ImportError:
has_sklearn = False
print('sklearn not available')
def cov2corr(cov):
std_ = np.sqrt(np.diag(cov))
corr = cov / np.outer(std_, std_)
return corr
if has_sklearn:
from sklearn.covariance import LedoitWolf, OAS, MCD
lw = LedoitWolf(store_precision=False)
lw.fit(rr, assume_centered=False)
cov_lw = lw.covariance_
corr_lw = cov2corr(cov_lw)
oas = OAS(store_precision=False)
oas.fit(rr, assume_centered=False)
cov_oas = oas.covariance_
corr_oas = cov2corr(cov_oas)
mcd = MCD()#.fit(rr, reweight=None)
mcd.fit(rr, assume_centered=False)
cov_mcd = mcd.covariance_
corr_mcd = cov2corr(cov_mcd)
titles = ['raw correlation', 'lw', 'oas', 'mcd']
normcolor = None
fig = plt.figure()
for i, c in enumerate([rrcorr, corr_lw, corr_oas, corr_mcd]):
#for i, c in enumerate([np.cov(rr, rowvar=0), cov_lw, cov_oas, cov_mcd]):
ax = fig.add_subplot(2,2,i+1)
plot_corr(c, xnames=None, title=titles[i],
normcolor=normcolor, ax=ax)
images = [c for ax in fig.axes for c in ax.get_children() if isinstance(c, mpl.image.AxesImage)]
fig. subplots_adjust(bottom=0.1, right=0.9, top=0.9)
cax = fig.add_axes([0.9, 0.1, 0.025, 0.8])
fig.colorbar(images[0], cax=cax)
corrli = [rrcorr, corr_lw, corr_oas, corr_mcd, pcacorr]
diffssq = np.array([[((ci-cj)**2).sum() for ci in corrli]
for cj in corrli])
diffsabs = np.array([[np.max(np.abs(ci-cj)) for ci in corrli]
for cj in corrli])
print(diffssq)
print('\nmaxabs')
print(diffsabs)
fig.savefig('corrmatrix_sklearn.png', dpi=120)
fig2 = plot_corr_grid(corrli+[residcorr], ncols=3,
titles=titles+['pca', 'pca-residual'],
xnames=[], ynames=[])
fig2.savefig('corrmatrix_sklearn_2.png', dpi=120)
#plt.show()
#plt.close('all')
| bsd-3-clause |
lazywei/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 130 | 22974 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:3])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
| bsd-3-clause |
fyffyt/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 78 | 34552 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
nmartensen/pandas | doc/sphinxext/numpydoc/plot_directive.py | 89 | 20530 | """
A special directive for generating a matplotlib plot.
.. warning::
This is a hacked version of plot_directive.py from Matplotlib.
It's very much subject to change!
Usage
-----
Can be used like this::
.. plot:: examples/example.py
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3], [4,5,6])
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
The content is interpreted as doctest formatted if it has a line starting
with ``>>>``.
The ``plot`` directive supports the options
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. Default can be changed in conf.py
and the ``image`` directive options ``alt``, ``height``, ``width``,
``scale``, ``align``, ``class``.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which plot:: file names are relative to.
(If None or empty, file names are relative to the directoly where
the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
TODO
----
* Refactor Latex output; now it's plain images, but it would be nice
to make them appear side-by-side, or in floats.
"""
from __future__ import division, absolute_import, print_function
import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback
import sphinx
if sys.version_info[0] >= 3:
from io import StringIO
else:
from io import StringIO
import warnings
warnings.warn("A plot_directive module is also available under "
"matplotlib.sphinxext; expect this numpydoc.plot_directive "
"module to be deprecated after relevant features have been "
"integrated there.",
FutureWarning, stacklevel=2)
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_config_value('plot_pre_code', '', True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_directive('plot', plot_directive, True, (0, 1, False),
**plot_directive_options)
#------------------------------------------------------------------------------
# plot:: directive
#------------------------------------------------------------------------------
from docutils.parsers.rst import directives
from docutils import nodes
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_format(arg):
return directives.choice(arg, ('python', 'lisp'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
plot_directive_options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
}
#------------------------------------------------------------------------------
# Generating output
#------------------------------------------------------------------------------
from docutils import nodes, utils
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{% endfor %}
{{ only_latex }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endfor %}
"""
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def run(arguments, content, options, state_machine, state, lineno):
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
options.setdefault('include-source', config.plot_include_source)
# determine input
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if arguments:
if not config.plot_basedir:
source_file_name = os.path.join(rst_dir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
code = open(source_file_name, 'r').read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = makefig(code, source_file_name, build_dir, output_base,
config)
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s: %s" % (output_base, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
opts = [':%s: %s' % (key, val) for key, val in list(options.items())
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
if j == 0:
src_link = source_link
else:
src_link = None
result = format_template(
TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
shutil.copyfile(fn, os.path.join(dest_dir,
os.path.basename(fn)))
# copy script (if necessary)
if source_file_name == rst_file:
target_name = os.path.join(dest_dir, output_base + source_ext)
f = open(target_name, 'w')
f.write(unescape_doctest(code))
f.close()
return errors
#------------------------------------------------------------------------------
# Run code and capture figures
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
import exceptions
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None):
# Change the working directory to the directory of the example, so
# it can get at its data files, if any.
pwd = os.getcwd()
old_sys_path = list(sys.path)
if code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Redirect stdout
stdout = sys.stdout
sys.stdout = StringIO()
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
exec(setup.config.plot_pre_code, ns)
exec(code, ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
#------------------------------------------------------------------------------
# Generating figures
#------------------------------------------------------------------------------
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived)
or os.stat(derived).st_mtime < os.stat(original).st_mtime)
def makefig(code, code_path, output_dir, output_base, config):
"""
Run a pyplot script *code* and save the images under *output_dir*
with file names derived from *output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
formats = []
for fmt in config.plot_formats:
if isinstance(fmt, str):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in range(1000):
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# -- We didn't find the files, so build them
results = []
ns = {}
for i, code_piece in enumerate(code_pieces):
# Clear between runs
plt.close('all')
# Run code
run_code(code_piece, code_path, ns)
# Collect images
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except exceptions.BaseException as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
# Results
results.append((code_piece, images))
return results
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
try:
from os.path import relpath
except ImportError:
# Copied from Python 2.7
if 'posix' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
elif 'nt' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir, splitunc
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
else:
raise RuntimeError("Unsupported platform (no relpath available!)")
| bsd-3-clause |
tbenthompson/codim1 | test/test_elastic_kernel.py | 1 | 13129 | from codim1.fast_lib import DisplacementKernel,\
TractionKernel,\
AdjointTractionKernel,\
HypersingularKernel,\
RegularizedHypersingularKernel,\
SemiRegularizedHypersingularKernel,\
double_integral
from codim1.core import *
import numpy as np
def test_kernel_set():
eks = ElasticKernelSet(1.0, 0.25)
assert(eks.k_d)
def test_traction_kernel_elements():
E = 1e5
nu = 0.3
shear_modulus = E / (2 * (1 + nu))
kernel = TractionKernel(shear_modulus, nu)
T = kernel.call(np.array([0, 4.7285]),
np.zeros(2),
np.array([-1.0, 0.0]))
exact = np.array([[0, 0.0096],[-0.0096, 0]])
np.testing.assert_almost_equal(exact, np.array(T), 4)
def test_displacement_symmetry():
kernel = DisplacementKernel(1.0, 0.25)
a = np.array(kernel.call(np.array([1.0, 0.5]),
np.array([0.0, 0.0]),
np.array([0.0, 0.0])))
np.testing.assert_almost_equal(a - a.T, np.zeros_like(a))
def test_displacement_mirror_symmetry():
kernel = DisplacementKernel(1.0, 0.25)
a = np.array(kernel.call(np.array([1.0, 0.5]),
np.zeros(2), np.array([1.0, 0.0])))
b = np.array(kernel.call(np.array([-1.0, -0.5]),
np.zeros(2), np.array([1.0, 0.0])))
np.testing.assert_almost_equal(a, b)
def test_traction_mirror_symmety():
kernel = TractionKernel(1.0, 0.25)
a = np.array(kernel.call(np.array([1.0, 0.5]),
np.zeros(2), np.array([1.0, 0.0])))
# Only symmetric if we reverse the normal vector too!
b = np.array(kernel.call(np.array([-1.0, -0.5]),
np.zeros(2), np.array([-1.0, 0.0])))
np.testing.assert_almost_equal(a, b)
# def test_reverse_normal():
# kernel = TractionKernel(1.0, 0.25)
# a = np.array(kernel.call(np.array([1.0, 0.5]),
# np.zeros(2), np.array([1.0, 0.0])))
# # Only symmetric if we reverse the normal vector too!
# kernel.reverse_normal = True
# b = np.array(kernel.call(np.array([-1.0, -0.5]),
# np.zeros(2), np.array([1.0, 0.0])))
# np.testing.assert_almost_equal(a, b)
def test_displacement():
kernel = DisplacementKernel(1.0, 0.25)
G = kernel.call(np.array([2.0, 0.0]),
np.array([0, 0.0]),
np.array([0, 1.0]))
np.testing.assert_almost_equal(G[0][0],
(2 * np.log(1 / 2.0) + 1) / (6 * np.pi))
np.testing.assert_almost_equal(G[1][0], 0.0)
np.testing.assert_almost_equal(G[0][1], 0.0)
np.testing.assert_almost_equal(G[1][1],
(2 * np.log(1 / 2.0)) / (6 * np.pi))
def test_traction():
kernel = TractionKernel(1.0, 0.25)
H = kernel.call(np.array([2.0, 0.0]),
np.array([0, 0.0]),
np.array([0, 1.0]))
np.testing.assert_almost_equal(H[0][1],
1 / (6 * np.pi * 2.0))
np.testing.assert_almost_equal(H[0][0], 0.0)
np.testing.assert_almost_equal(H[1][1], 0.0)
np.testing.assert_almost_equal(H[1][0], -H[0][1])
def test_traction_adjoint():
kernel = AdjointTractionKernel(1.0, 0.25)
HT = kernel.call(np.array([2.0, 0.0]),
np.array([0, 1.0]),
np.array([0, 0.0]))
np.testing.assert_almost_equal(HT[0][1],
1 / (6 * np.pi * 2.0))
np.testing.assert_almost_equal(HT[0][0], 0.0)
np.testing.assert_almost_equal(HT[1][1], 0.0)
np.testing.assert_almost_equal(HT[1][0], -HT[0][1])
def test_hypersingular_regularized_set_interior():
kernel = RegularizedHypersingularKernel(1.0, 0.25)
kernel.set_interior_data([2.0, 0.0], [0.0, 1.0])
data = kernel.get_interior_integral_data([0.0, 0.0], [0.0, 0.0])
W = kernel._call(data, 0, 0)
W_exact = 2 * (np.log(2) - 1) / (3 * np.pi)
np.testing.assert_almost_equal(W, W_exact)
def test_hypersingular_regularized_set_interior_defaults():
kernel = RegularizedHypersingularKernel(1.0, 0.25)
data = kernel.get_interior_integral_data([-2.0, 0.0], [0.0, 0.0])
W = kernel._call(data, 0, 0)
W_exact = 2 * (np.log(2) - 1) / (3 * np.pi)
np.testing.assert_almost_equal(W, W_exact)
def test_hypersingular_regularized():
kernel = RegularizedHypersingularKernel(1.0, 0.25)
W = kernel.call(np.array([2.0, 0.0]),
np.array([0, 1.0]),
np.array([0, 0.0]))
W_exact = np.array([[2 * (np.log(2) - 1) / (3 * np.pi), 0],
[0, 2 * np.log(2) / (3 * np.pi)]])
np.testing.assert_almost_equal(W, W_exact)
def test_hypersingular_nonregularized():
kernel = HypersingularKernel(1.0, 0.25)
S = kernel.call(np.array([2.0, 0.0]),
np.array([1, 0.0]),
np.array([0, 1.0]))
S_exact = np.array([[[ 0. , 0.05305165],
[ 0.05305165, 0. ]],
[[ 0.05305165, 0. ],
[ 0. , 0.05305165]]])
S_exact = S_exact[:, 0, :]# + S_exact[:, 1, :]
np.testing.assert_almost_equal(S_exact, S)
def test_hypersingular_vs_regularized():
# By the regularization of the hypersingular integral, these two
# integrations should give the same result.
# I've left
# LOTS OF DETECTIVE WORK!
# in this function, because I had a fun (awful?) time figuring out
# how to get these two integrations to match up... Took three (four?)
# full days...
# The integrations are only equal for an interior basis function. If
# the basis function's support crosses two elements, the point n - 1
# dimensional term in the integration by parts still influences the
# result
k_rh = RegularizedHypersingularKernel(1.0, 0.25)
k_sh = SemiRegularizedHypersingularKernel(1.0, 0.25)
k_h = HypersingularKernel(1.0, 0.25)
K = 30
mesh = circular_mesh(K, 2.0)
bf = basis_from_degree(2)
grad_bf = bf.get_gradient_basis()
qs = QuadStrategy(mesh, 10, 10, 10, 10)
apply_to_elements(mesh, "basis", bf, non_gen = True)
apply_to_elements(mesh, "continuous", True, non_gen = True)
init_dofs(mesh)
el1 = 15
# pp0 = mesh.get_physical_point(el1, 0.5)
# m = mesh.get_normal(el1, 0.5)
a = np.zeros((K, 2, 2))
b = np.zeros((K, 2, 2))
c = np.zeros((K, 2, 2))
# qq = np.zeros((K, 2, 2))
# cr1 = np.zeros(K)
# cr2 = np.zeros(K)
# n2x = np.zeros(K)
# n2y = np.zeros(K)
# grad2x = np.zeros(K)
# grad2y = np.zeros(K)
# k_rh_val = np.zeros((K, 2, 2))
# k_h_val = np.zeros((K, 2, 2))
for el2 in range(K):
if np.abs(el2 - el1) < 2.5:
continue
i = 1
j = 1
o_q, i_q = qs.get_quadrature('logr', mesh.elements[el1], mesh.elements[el2])
o_q = o_q
a[el2, :, :] = double_integral(mesh.elements[el1].mapping.eval, mesh.elements[el2].mapping.eval, k_rh,
grad_bf,
grad_bf,
o_q, i_q, i, j)
b[el2, :, :] = double_integral(mesh.elements[el1].mapping.eval, mesh.elements[el2].mapping.eval, k_h,
bf, bf,
o_q, i_q, i, j)
c[el2, :, :] = double_integral(mesh.elements[el1].mapping.eval, mesh.elements[el2].mapping.eval, k_sh,
grad_bf, bf,
o_q, i_q, i, j)
# qq[el2, :, :] = double_integral(mesh, k_rh, bf, bf,
# o_q, i_q, el1, 1, el2, 1)
# # cr1[el2] = grad_bf.chain_rule(el1, 0.5)[0]
# n2x[el2], n2y[el2] = mesh.get_normal(el2, 0.5)
# grad2x[el2], grad2y[el2] = _get_deriv_point(mesh.basis_fncs.derivs,
# mesh.coefficients,
# el2,
# 0.5)
# cr2[el2] = -n2x[el2] * grad2y[el2] + n2y[el2] * grad2x[el2]
# pp = mesh.get_physical_point(el2, 0.5)
# k_rh_val[el2, :, :] = \
# k_rh.call(pp - pp0, m, np.array([n2x[el2], n2y[el2]]))
# k_h_val[el2, :, :] = \
# k_h.call(pp - pp0, m, np.array([n2x[el2], n2y[el2]]))
# #easiest comparison
# from matplotlib import pyplot as plt
# plt.plot(range(K), a[:, 1, 1], label='ayy')
# plt.plot(range(K), b[:, 1, 1], label='byy')
# plt.plot(range(K), c[:, 1, 1], label='cyy')
# plt.plot(cr1 / 100.0)
# plt.figure()
# plt.plot(range(K), a[:, 1, 1, 0, 0], label='axx')
# plt.plot(range(K), a[:, 0, 1], label='axy')
# plt.plot(range(K), b[:, 1, 1, 0, 0], label='bxx')
# plt.plot(range(K), b[:, 0, 1], label='bxy')
# plt.plot(range(K), qq[:, 0, 0], label='other')
# plt.plot(grad2x * n2x + grad2y ** 2)
# plt.legend()
# plt.figure()
# plt.plot(a[:, 0, 0] / (1.0 * b[:, 0, 0]))
# plt.plot(a[:, 1, 1] / (1.0 * b[:, 1, 1]))
# plt.plot(a[:, 0, 1] / (1.0 * b[:, 0, 1]))
# plt.ylim([-1.5, 1.5])
# # plt.plot(grad2x, label='gradx')
# # plt.plot(grad2y, label='grady')
# # plt.plot(n2y, label='normal')
# plt.figure()
# # plt.plot(k_rh_val[:, 0, 0], label='regularized')
# # plt.plot(k_h_val[:, 0, 0], label='hyp')
# plt.plot(k_h_val[:, 0, 0] / k_rh_val[:, 0, 0], label='divided')
# plt.legend()
# plt.figure()
# plt.plot(n2y, label='normal')
# plt.plot(cr2, label='cr')
# plt.legend()
# plt.show()
np.testing.assert_almost_equal(a, b, 2)
np.testing.assert_almost_equal(a, c, 2)
np.testing.assert_almost_equal(b, c, 2)
def test_hypersingular_vs_regularized_across_elements():
# The regularization is only valid for a continuous basis, so the
# integrations will not be equal unless I account for both elements.
k_rh = RegularizedHypersingularKernel(1.0, 0.25)
k_sh = SemiRegularizedHypersingularKernel(1.0, 0.25)
k_h = HypersingularKernel(1.0, 0.25)
K = 30
mesh = circular_mesh(K, 2.0)
bf = basis_from_degree(2)
grad_bf = bf.get_gradient_basis()
qs = QuadStrategy(mesh, 10, 10, 10, 10)
apply_to_elements(mesh, "basis", bf, non_gen = True)
apply_to_elements(mesh, "continuous", True, non_gen = True)
init_dofs(mesh)
el1a = 15
el1b = 14
el2a = 25
el2b = 26
o_q, i_q = qs.get_quadrature('logr', mesh.elements[el1a], mesh.elements[el2a])
o_q = o_q
# Four integrals for this matrix term. Two choices of source element
# and two choices of solution element.
a1 = double_integral(mesh.elements[el1a].mapping.eval,
mesh.elements[el2a].mapping.eval, k_rh,
grad_bf, grad_bf,
o_q, i_q, 0, 2)
a2 = double_integral(mesh.elements[el1a].mapping.eval,
mesh.elements[el2b].mapping.eval, k_rh,
grad_bf, grad_bf,
o_q, i_q, 0, 0)
a3 = double_integral(mesh.elements[el1b].mapping.eval,
mesh.elements[el2a].mapping.eval, k_rh,
grad_bf, grad_bf, o_q, i_q, 2, 2)
a4 = double_integral(mesh.elements[el1b].mapping.eval
, mesh.elements[el2b].mapping.eval, k_rh,
grad_bf, grad_bf, o_q, i_q, 2, 0)
b1 = double_integral(mesh.elements[el1a].mapping.eval
, mesh.elements[el2a].mapping.eval, k_h,
bf, bf, o_q, i_q, 0, 2)
b2 = double_integral(mesh.elements[el1a].mapping.eval
, mesh.elements[el2b].mapping.eval, k_h,
bf, bf, o_q, i_q, 0, 0)
b3 = double_integral(mesh.elements[el1b].mapping.eval
, mesh.elements[el2a].mapping.eval, k_h,
bf, bf, o_q, i_q, 2, 2)
b4 = double_integral(mesh.elements[el1b].mapping.eval
, mesh.elements[el2b].mapping.eval, k_h,
bf, bf, o_q, i_q, 2, 0)
c1 = double_integral(mesh.elements[el1a].mapping.eval
, mesh.elements[el2a].mapping.eval, k_sh,
grad_bf, bf, o_q, i_q, 0, 2)
c2 = double_integral(mesh.elements[el1a].mapping.eval
, mesh.elements[el2b].mapping.eval, k_sh,
grad_bf, bf, o_q, i_q, 0, 0)
c3 = double_integral(mesh.elements[el1b].mapping.eval
, mesh.elements[el2a].mapping.eval, k_sh,
grad_bf, bf, o_q, i_q, 2, 2)
c4 = double_integral(mesh.elements[el1b].mapping.eval
, mesh.elements[el2b].mapping.eval, k_sh,
grad_bf, bf, o_q, i_q, 2, 0)
a = np.array(a1) + np.array(a2) + np.array(a3) + np.array(a4)
b = np.array(b1) + np.array(b2) + np.array(b3) + np.array(b4)
c = np.array(c1) + np.array(c2) + np.array(c3) + np.array(c4)
np.testing.assert_almost_equal(a, b)
np.testing.assert_almost_equal(b, c)
np.testing.assert_almost_equal(a, c)
if __name__ == "__main__":
# test_traction()
test_hypersingular_regularized_set_interior_defaults()
| mit |
jorge2703/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
konder/tushare | tushare/datayes/fundamental.py | 16 | 18026 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Fundamental():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def FdmtBS(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的合并资产负债表模板,收集了2007年以来沪深上市公司定期报告中各个会计期间的资产负债表数据;
2、仅收集合并报表数据,包括期末和期初数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTBS%(reportType, secID, ticker,
beginDate, endDate, publishDateBegin,
publishDateEnd, field))
return _ret_data(code, result)
def FdmtBSBank(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的银行业资产负债表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的资产负债表数据;(主要是银行业上市公司)
2、仅收集合并报表数据,包括期末和期初数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTBSBANK%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtBSSecu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的证券业资产负债表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的资产负债表数据;(主要是证券业上市公司)
2、仅收集合并报表数据,包括期末和期初数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTBSSECU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin,
publishDateEnd, field))
return _ret_data(code, result)
def FdmtBSIndu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的一般工商业资产负债表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的资产负债表数据;(主要是一般工商业上市公司)
2、仅收集合并报表数据,包括期末和期初数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTBSINDU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtBSInsu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的保险业资产负债表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的资产负债表数据;(主要是保险业上市公司)
2、仅收集合并报表数据,包括期末和期初数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元。
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTBSINSU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtCF(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的合并现金流量表模板,收集了2007年以来沪深上市公司定期报告中各个会计期间的现金流量表数据;
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTCF%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtCFBank(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的银行业现金流量表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的现金流量表数据;(主要是银行业上市公司) 2、仅收集合并报表数据,包括本期和上期数据; 3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示; 4、本表中单位为人民币元;5、每季更新。
"""
code, result = self.client.getData(vs.FDMTCFBANK%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtCFSecu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的证券业现金流量表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的现金流量表数据;(主要是证券业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTCFSECU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtCFIndu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的一般工商业现金流量表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的现金流量表数据;(主要是一般工商业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTCFINDU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtCFInsu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的保险业现金流量表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的现金流量表数据;(主要是保险业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTCFINSU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtIS(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的合并利润表模板,收集了2007年以来沪深上市公司定期报告中各个会计期间的利润表数据;
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTIS%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtISBank(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的银行业利润表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的利润表数据;(主要是银行业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTISBANK%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtISSecu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的证券业利润表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的利润表数据;(主要是证券业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTISSECU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtISIndu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的一般工商业利润表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的利润表数据;(主要是一般工商业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTISINDU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtISInsu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的保险业利润表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的利润表数据;(主要是保险业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTISINSU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtEe(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
获取2007年及以后年度上市公司披露的业绩快报中的主要财务指标等其他数据,
包括本期,去年同期,及本期与期初数值同比数据。每季证券交易所披露相关公告时更新数据,
公司ipo时发布相关信息也会同时更新。每日9:00前完成证券交易所披露的数据更新,中午发布公告每日12:45前完成更新。
"""
code, result = self.client.getData(vs.FDMTEE%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtEf(self, reportType='', secID='', ticker='', beginDate='', endDate='',
forecastType='', publishDateBegin='', publishDateEnd='', field=''):
"""
1、获取2007年及以后年度上市公司披露的公告中的预期下一报告期收入、净利润、归属于母公司净利润、基本每股收益及其幅度变化数据。
2、上市公司对经营成果科目的预计情况数据一般为其上限与下限,上限取值为公告中披露该科目中绝对值较大值,下限取值为公告中披露该科目中绝对值较小值。
3、数值为"正"代表该公司预计盈利,数值为"负"代表该公司预计亏损。若上下限"正"、"负"符号不同,代表该公司盈利亏损情况尚不确定。
4、业绩预期类型以公告中文字披露预期类型为准,若公告中未有文字披露预期类型,则根据数据情况判断预期类型。
5、每季证券交易所披露相关公告时更新数据,公司ipo时发布相关信息也会同时更新。每日9:00前完成证券交易所披露的数据更新,中午发布公告每日12:45前完成更新。
"""
code, result = self.client.getData(vs.FDMTEF%(reportType, secID, ticker,
beginDate, endDate, forecastType,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtISLately(self, field=''):
"""
1、可获取上市公司最近一次数据,根据2007年新会计准则制定的合并利润表模板,仅收集合并报表数据;
2、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
3、本表中单位为人民币元;
4、每季更新。
"""
code, result = self.client.getData(vs.FDMTISLATELY%(field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
bzcheeseman/phys211 | Alex/Relativistic Electron Dispersion/plotter.py | 1 | 3556 | from scipy import optimize
import numpy as np
import matplotlib.pyplot as plt
a = 1.42372210086
aerr = 0.00295712984228
b = 0.0770992785753
berr = 0.00969212354148
cedges = np.array([338,234,749,279,614,186,634,773])
cedgerr = np.array([3,3,3,3,3,4,3,4])
cpeaks = np.array([469,353,900,405,758,296,782,922])
cperr = np.array([2] * len(cpeaks))
def c2e(cs, errs): #cs is an array of channels
es = a * cs + b
errs = (aerr/a + errs/cs) * cs + berr
return es, errs
T, Terrs = c2e(cedges, cedgerr)
peaks, peakerr = c2e(cpeaks, cperr) #peak = initial photon Energy
pc = np.array(2*peaks - T)
pcerr = np.array(2*peakerr + Terrs)
ys = np.array(pc*pc / (2 * T))
yserr = np.array(np.sqrt((2*pc/(2*T))*(2*pc/(2*T))*pcerr*pcerr + (pc*pc*2/(T*T))*(pc*pc*2/(T*T))*Terrs*Terrs))
print ys, T, yserr
### FIT AND PLOT (PC)^2/2T (ys) against T
def poly(p, x):
return p[0]*(x) + p[1]
def residual(p, x, y, err):
return (poly(p, x) - y) / err
p0 = np.array([1.,1.])
pf, cov, info, mesg, success = optimize.leastsq(residual, p0, args=(T, ys, yserr), full_output=1, maxfev=1000)
print pf
chisq = sum(info["fvec"]*info["fvec"])
print chisq
dof = len(ys)-len(pf)
pferr = [np.sqrt(cov[i,i]) for i in range(len(pf))]
fig = plt.figure()
ax = plt.axes()
ax.errorbar(T, ys, xerr=0., yerr=yserr, fmt='k.', label = 'Data')
xs = np.linspace(T.min(), T.max(), 5000)
ax.plot(xs, poly(pf, xs), 'r-', label = 'fit')
ax.set_title('Energy - Momentum Relation')
ax.set_xlabel('T')
ax.set_ylabel('$(pc)^2/2T$')
ax.legend(loc=(0.77,0.65))
textfit = '$f(T) = A T + B$ \n' \
'$A = %.2f \pm %.2f$ \n' \
'$B = %.1f \pm %.1f$ keV \n' \
'$\chi^2= %.2f$ \n' \
'$N = %i$ (dof) \n' \
'$\chi^2/N = % .2f$' \
% (pf[0], pferr[0], pf[1], pferr[1], chisq, dof,
chisq/dof)
ax.text(0.1, .9, textfit, transform=ax.transAxes, fontsize=12,
verticalalignment='top')
plt.savefig('plots/energy_momentum.png')
plt.show()
restmass = 2*peaks*(peaks - T)/T
rmerr = np.sqrt((4*peaks/T)*(4*peaks/T)*(peakerr)*(peakerr) + (2*peaks*peaks/(T*T))*(2*peaks*peaks/(T*T))*Terrs*Terrs)
rmerr += 2*peakerr
meanmass = np.mean(restmass)
print meanmass
massx = [meanmass] * len(restmass)
massx1 = [meanmass + 7] * 5000
massx2 = [meanmass - 7] * 5000
masserr = np.std(restmass)
print masserr
fig2 = plt.figure()
ax2 = plt.axes()
ax2.errorbar(T, restmass, xerr=0., yerr=rmerr, fmt='k.', label = 'Data')
ex = np.linspace(np.min(T), np.max(T), 5000)
ax2.plot(T, massx, 'r-', label = 'mean = 512.1keV')
ax2.plot(ex, massx1, 'b-.')
ax2.plot(ex, massx2, 'b-.')
ax2.set_title('Rest Mass Calculation')
ax2.set_xlabel('Compton Edge (keV)')
ax2.set_ylabel('Rest Mass')
ax2.legend(loc=(0.6,0.85))
plt.savefig('plots/restmass_T.png')
plt.show()
### BETA STUFF ###
beta = T * (2*peaks - T)/(T*T - 2*peaks*T + 2*peaks*peaks)
betaerr = (4*peaks*(peaks-T)*np.sqrt((peaks*Terrs)*(peaks*Terrs)+(T*peakerr)*(T*peakerr)))
betaerr /= (T*T - 2*T*peaks + 2*peaks*peaks)*(T*T - 2*T*peaks + 2*peaks*peaks)
fig3 = plt.figure()
ax3 = plt.axes()
ax3.errorbar(beta, pc, xerr=betaerr, yerr=pcerr, fmt='k.', label = 'Data')
ax3.set_title('Momentum vs Beta')
ax3.set_xlabel('Beta (v/c)')
ax3.set_ylabel('Momentum (pc)')
plt.savefig('plots/momentum_beta.png')
plt.show()
fig4 = plt.figure()
ax4 = plt.axes()
ax4.errorbar(beta, T, xerr=betaerr, yerr=Terrs, fmt='k.', label = 'Data')
ax4.set_title('Kinetic Energy vs Beta')
ax4.set_xlabel('Beta (v/c)')
ax4.set_ylabel('Kinetic Energy (T)')
plt.savefig('plots/T_beta.png')
plt.show()
| lgpl-3.0 |
pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/sparse/scipy_sparse.py | 18 | 5516 | """
Interaction with scipy.sparse matrices.
Currently only includes SparseSeries.to_coo helpers.
"""
from pandas.core.index import MultiIndex, Index
from pandas.core.series import Series
from pandas.compat import OrderedDict, lmap
def _check_is_partition(parts, whole):
whole = set(whole)
parts = [set(x) for x in parts]
if set.intersection(*parts) != set():
raise ValueError(
'Is not a partition because intersection is not null.')
if set.union(*parts) != whole:
raise ValueError('Is not a partition because union is not the whole.')
def _to_ijv(ss, row_levels=(0, ), column_levels=(1, ), sort_labels=False):
""" For arbitrary (MultiIndexed) SparseSeries return
(v, i, j, ilabels, jlabels) where (v, (i, j)) is suitable for
passing to scipy.sparse.coo constructor. """
# index and column levels must be a partition of the index
_check_is_partition([row_levels, column_levels], range(ss.index.nlevels))
# from the SparseSeries: get the labels and data for non-null entries
values = ss._data.internal_values()._valid_sp_values
nonnull_labels = ss.dropna()
def get_indexers(levels):
""" Return sparse coords and dense labels for subset levels """
# TODO: how to do this better? cleanly slice nonnull_labels given the
# coord
values_ilabels = [tuple(x[i] for i in levels)
for x in nonnull_labels.index]
if len(levels) == 1:
values_ilabels = [x[0] for x in values_ilabels]
# # performance issues with groupby ###################################
# TODO: these two lines can rejplace the code below but
# groupby is too slow (in some cases at least)
# labels_to_i = ss.groupby(level=levels, sort=sort_labels).first()
# labels_to_i[:] = np.arange(labels_to_i.shape[0])
def _get_label_to_i_dict(labels, sort_labels=False):
""" Return OrderedDict of unique labels to number.
Optionally sort by label.
"""
labels = Index(lmap(tuple, labels)).unique().tolist() # squish
if sort_labels:
labels = sorted(list(labels))
d = OrderedDict((k, i) for i, k in enumerate(labels))
return (d)
def _get_index_subset_to_coord_dict(index, subset, sort_labels=False):
def robust_get_level_values(i):
# if index has labels (that are not None) use those,
# else use the level location
try:
return index.get_level_values(index.names[i])
except KeyError:
return index.get_level_values(i)
ilabels = list(zip(*[robust_get_level_values(i) for i in subset]))
labels_to_i = _get_label_to_i_dict(ilabels,
sort_labels=sort_labels)
labels_to_i = Series(labels_to_i)
if len(subset) > 1:
labels_to_i.index = MultiIndex.from_tuples(labels_to_i.index)
labels_to_i.index.names = [index.names[i] for i in subset]
labels_to_i.name = 'value'
return (labels_to_i)
labels_to_i = _get_index_subset_to_coord_dict(ss.index, levels,
sort_labels=sort_labels)
# #####################################################################
# #####################################################################
i_coord = labels_to_i[values_ilabels].tolist()
i_labels = labels_to_i.index.tolist()
return i_coord, i_labels
i_coord, i_labels = get_indexers(row_levels)
j_coord, j_labels = get_indexers(column_levels)
return values, i_coord, j_coord, i_labels, j_labels
def _sparse_series_to_coo(ss, row_levels=(0, ), column_levels=(1, ),
sort_labels=False):
""" Convert a SparseSeries to a scipy.sparse.coo_matrix using index
levels row_levels, column_levels as the row and column
labels respectively. Returns the sparse_matrix, row and column labels.
"""
import scipy.sparse
if ss.index.nlevels < 2:
raise ValueError('to_coo requires MultiIndex with nlevels > 2')
if not ss.index.is_unique:
raise ValueError('Duplicate index entries are not allowed in to_coo '
'transformation.')
# to keep things simple, only rely on integer indexing (not labels)
row_levels = [ss.index._get_level_number(x) for x in row_levels]
column_levels = [ss.index._get_level_number(x) for x in column_levels]
v, i, j, rows, columns = _to_ijv(ss, row_levels=row_levels,
column_levels=column_levels,
sort_labels=sort_labels)
sparse_matrix = scipy.sparse.coo_matrix(
(v, (i, j)), shape=(len(rows), len(columns)))
return sparse_matrix, rows, columns
def _coo_to_sparse_series(A, dense_index=False):
""" Convert a scipy.sparse.coo_matrix to a SparseSeries.
Use the defaults given in the SparseSeries constructor.
"""
s = Series(A.data, MultiIndex.from_arrays((A.row, A.col)))
s = s.sort_index()
s = s.to_sparse() # TODO: specify kind?
if dense_index:
# is there a better constructor method to use here?
i = range(A.shape[0])
j = range(A.shape[1])
ind = MultiIndex.from_product([i, j])
s = s.reindex_axis(ind)
return s
| gpl-2.0 |
exxeleron/qPython | doc/source/conf.py | 1 | 8654 | # -*- coding: utf-8 -*-
#
# qPython documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 09 07:11:15 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from mock import Mock as MagicMock
class Mock(MagicMock):
__all__ = []
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['argparse', 'numpy', 'pandas']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# workaround for building docs without numpy
import numpy
numpy.frombuffer = lambda x, dtype: [None]
numpy.ndarray = Mock
# end-of-workaround
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'qPython'
copyright = u'2014-2016, DEVnet'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from qpython import __version__
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'qPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'qPython.tex', u'qPython Documentation',
u'DEVnet', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'qpython', u'qPython Documentation',
[u'DEVnet'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'qPython', u'qPython Documentation',
u'DEVnet', 'qPython', 'Interprocess communication between Python and kdb+',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autodoc_member_order = 'bysource' | apache-2.0 |
roxyboy/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 244 | 1593 | import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == iris.target), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
clf.fit(iris.data, iris.target)
assert_raises(ValueError, clf.transform, iris.data, "gobbledigook")
assert_raises(ValueError, clf.transform, iris.data, ".5 * gobbledigook")
| bsd-3-clause |
h2educ/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 68 | 23597 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr", "sag"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-8).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky', 'sag']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:-1])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
@ignore_warnings
def test_n_iter():
# Test that self.n_iter_ is correct.
n_targets = 2
X, y = X_diabetes, y_diabetes
y_n = np.tile(y, (n_targets, 1)).T
for max_iter in range(1, 4):
for solver in ('sag', 'lsqr'):
reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12)
reg.fit(X, y_n)
assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets))
for solver in ('sparse_cg', 'svd', 'cholesky'):
reg = Ridge(solver=solver, max_iter=1, tol=1e-1)
reg.fit(X, y_n)
assert_equal(reg.n_iter_, None)
| bsd-3-clause |
fredhusser/scikit-learn | examples/model_selection/grid_search_digits.py | 227 | 2665 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
MMaus/mutils | libshai/phaser.py | 1 | 17012 | from numpy import *
from util import *
from scipy import signal
import warnings
from exceptions import Warning
"""
The phaser module provides an implementation of the phase estimation algorithm
of "Estimating the phase of synchronized oscillators";
S. Revzen & J. M. Guckenheimer; Phys. Rev. E; 2008, v. 78, pp. 051907
doi: 10.1103/PhysRevE.78.051907
Phaser takes in multidimensional data from multiple experiments and fits the
parameters of the phase estimator, which may then be used on new data or the
training data. The output of Phaser is a phase estimate for each time sample
in the data. This phase estimate has several desirable properties, such as:
(1) d/dt Phase is approximately constant
(2) Phase estimates are robust to measurement errors in any one variable
(3) Phase estimates are robust to systematic changes in the measurement error
The top-level class of this module is Phaser.
An example is found in test_sincos(); it requires matplotlib
"""
class ZScore( object ):
"""
Class for finding z scores of given measurements with given or computed
covarance matrix.
This class implements equation (7) of [Revzen08]
Properties:
y0 -- Dx1 -- measurement mean
M -- DxD -- measurement covariance matrix
S -- DxD -- scoring matrix
"""
def __init__( self, y = None, M = None ):
"""Computes the mean and scoring matrix of measurements
INPUT:
y -- DxN -- N measurements of a time series in D dimensions
M -- DxD (optional) -- measurement error covariance for y
-- If M is missing, it is assumed to be diagonal with variances
-- given by 1/2 variance of the second order differences of y
"""
# if M given --> use fromCovAndMean
# elif we got y --> use fromData
# else --> create empty object with None in members
if M is not None:
self.fromCovAndMean( mean(y, 1), M)
elif y is not None:
self.fromData( y )
else:
self.y0 = None
self.M = None
self.S = None
def fromCovAndMean( self, y0, M ):
"""
Compute scoring matrix based on square root of M through svd
INPUT:
y0 -- Dx1 -- mean of data
M -- DxD -- measurement error covariance of data
"""
self.y0 = y0
self.M = M
(D, V) = linalg.eig( M )
self.S = dot( V.transpose(), diag( 1/sqrt( D ) ) )
def fromData( self, y ):
"""
Compute scoring matrix based on estimated covariance matrix of y
Estimated covariance matrix is geiven by 1/2 variance of the second order
differences of y
INPUT:
y -- DxN -- N measurements of a time series in D dimensions
"""
self.y0 = mean( y, 1 )
self.M = diag( std( diff( y, n=2, axis=1 ), axis=1 ) )
self.S = diag( 1/sqrt( diag( self.M ) ) )
def __call__( self, y ):
"""
Callable wrapper for the class
Calls self.zScore internally
"""
return self.zScore( y )
def zScore( self, y ):
"""Computes the z score of measurement y using stored mean and scoring
matrix
INPUT:
y -- DxN -- N measurements of a time series in D dimensions
OUTPUT:
zscores for y -- DxN
"""
return dot( self.S, y - self.y0.reshape( len( self.y0 ), 1 ) )
def _default_psf(self, x):
"""Default Poincare section function
by rights, this should be inside the Phaser class, but pickle
would barf on Phaser objects if they contained functions that
aren't defined in the module top-level.
"""
return signal.lfilter(
array([0.02008336556421, 0.04016673112842,0.02008336556421] ),
array([1.00000000000000,-1.56101807580072,0.64135153805756] ),
x[0,:] )
class PhaserWarning( Warning ):
"""Warning class used for various data-quality warnings in Phaser"""
pass
class Phaser( object ):
"""
Concrete class implementing a Phaser phase estimator
Instance attributes:
sc -- ZScore object for converting y to z-scores
P_k -- list of D FourierSeries objects -- series correction for correcting proto-phases
prj -- D x 1 complex -- projector on combined proto-phase
P -- FourierSeries object -- series correction for combined phase
psf -- callable -- callback to psecfun
"""
def __init__( self, y = None, C = None, ordP = None, psecfunc = None ):
"""
Initilizing/training a phaser object
INPUT:
y -- DxN or [ DxN_1, DxN_2, DxN_3, ... ] -- Measurements used for training
C -- DxD (optional) -- Covariance matrix of measurements
ordP -- 1x1 (optional) -- Orders of series to use in series correction
psecfunc -- 1x1 (optional) -- Poincare section function
"""
# if psecfunc given -> use given
if psecfunc is not None:
self.psf = psecfunc
else:
self.psf = _default_psf
# if y given -> calls self.phaserTrain
if y is not None:
self.phaserTrain( y, C, ordP )
def __call__( self, dat ):
"""
Callable wrapper for the class. Calls phaserEval internally
"""
return self.phaserEval( dat )
def phaserEval( self, dat ):
"""
Computes the phase of testing data
INPUT:
dat -- DxN -- Testing data whose phase is to be determined
OUTPUT:
Returns the complex phase of input data
"""
# compute z score
z = self.sc.zScore( dat )
# compute Poincare section
p0 = self.psf( dat )
# compute protophase using Hilbert transform
zeta = self.mangle * hilbert( z )
z0, ido0 = Phaser.sliceN( zeta, p0 )
# Compute phase offsets for proto-phases
ofs = exp(-1j * angle(mean(z0, axis = 1)).T)
# series correction for each dimision using self.P_k
th = Phaser.angleUp( zeta * ofs[:,newaxis] )
# evaluable weights based on sample length
p = 1j * zeros( th.shape )
for k in range( th.shape[0] ):
p[k,:] = self.P_k[k].val( th[k,:] ).T + th[k,:]
rho = mean( abs( zeta ), 1 ).reshape(( zeta.shape[0], 1 ))
# compute phase projected onto first principal components using self.prj
ph = Phaser.angleUp( dot( self.prj.T, vstack( [cos( p ) * rho, sin( p ) * rho] ) ))
# return series correction of combined phase using self.P
phi = real( ph + self.P.val( ph ).T )
pOfs2 = (p0[ido0+1] * exp(1j * phi.T[ido0+1]) - p0[ido0] * exp(1j * phi.T[ido0] )) / (p0[ido0+1] - p0[ido0])
return phi - angle(sum(pOfs2))
def phaserTrain( self, y, C = None, ordP = None ):
"""
Trains the phaser object with given data.
INPUT:
y -- DxN or [ DxN_1, DxN_2, DxN_3, ... ] -- Measurements used for training
C -- DxD (optional) -- Covariance matrix of measurements
"""
# if given one sample -> treat it as an ensemble with one element
if y.__class__ is ndarray:
y = [y]
# Copy the list container
y = [yi for yi in y]
# check dimension agreement in ensemble
if len( set( [ ele.shape[0] for ele in y ] ) ) is not 1:
raise( Exception( 'newPhaser:dims','All datasets in the ensemble must have the same dimension' ) )
D = y[0].shape[0]
# train ZScore object based on the entire ensemble
self.sc = ZScore( hstack( y ), C )
# initializing proto phase variable
zetas = []
cycl = zeros( len( y ))
svm = 1j*zeros( (D, len( y )) )
svv = zeros( (D, len( y )) )
# compute protophases for each sample in the ensemble
for k in range( len( y ) ):
# hilbert transform the sample's z score
zetas.append( hilbert( self.sc.zScore( y[k] ) ) )
# trim beginning and end cycles, and check for cycle freq and quantity
cycl[k], zetas[k], y[k] = Phaser.trimCycle( zetas[k], y[k] )
# Computing the Poincare section
sk = self.psf( y[k] )
(sv, idx) = Phaser.sliceN( zetas[k], sk )
if idx.shape[-1] == 0:
raise Exception( 'newPhaser:emptySection', 'Poincare section is empty -- bailing out' )
svm[:,k] = mean( sv, 1 )
svv[:,k] = var( sv, 1 ) * sv.shape[1] / (sv.shape[1] - 1)
# computing phase offset based on psecfunc
self.mangle, ofs = Phaser.computeOffset( svm, svv )
# correcting phase offset for proto phase and compute weights
wgt = zeros( len( y ) )
rho_i = zeros(( len( y ), y[0].shape[0] ))
for k in range( len( y ) ):
zetas[k] = self.mangle * exp( -1j * ofs[k] ) * zetas[k]
wgt[k] = zetas[k].shape[0]
rho_i[k,:] = mean( abs( zetas[k] ), 1 )
# compute normalized weight for each dimension using weights from all samples
wgt = wgt.reshape(( 1, len( y )))
rho = ( dot( wgt, rho_i ) / sum( wgt ) ).T
# if ordP is None -> use high enough order to reach Nyquist/2
if ordP is None:
ordP = ceil( max( cycl ) / 4 )
# correct protophase using seriesCorrection
self.P_k = Phaser.seriesCorrection( zetas, ordP )
# loop over all samples of the ensemble
q = []
for k in range( len( zetas ) ):
# compute protophase angle
th = Phaser.angleUp( zetas[k] )
phi_k = 1j * ones( th.shape )
# loop over all dimensions
for ki in range( th.shape[0] ):
# compute corrected phase based on protophase
phi_k[ki,:] = self.P_k[ki].val( th[ki,:] ).T + th[ki,:]
# computer vectorized phase
q.append( vstack( [cos( phi_k ) * rho, sin( phi_k ) * rho] ) )
# project phase vectors using first two principal components
W = hstack( q[:] )
W = W - mean( W, 1 )[:,newaxis]
pc = svd( W, False )[0]
self.prj = reshape( pc[:,0] + 1j * pc[:,1], ( pc.shape[0], 1 ) )
# Series correction of combined phase
qz = []
for k in range( len( q ) ):
qz.append( dot( self.prj.T, q[k] ) )
# store object members for the phase estimator
self.P = Phaser.seriesCorrection( qz, ordP )[0]
def computeOffset( svm, svv ):
"""
"""
# convert variances into weights
svv = svv / sum( svv, 1 ).reshape( svv.shape[0], 1 )
# compute variance weighted average of phasors on cross section to give the phase offset of each protophase
mangle = sum( svm * svv, 1)
if any( abs( mangle ) ) < .1:
b = find( abs( mangle ) < .1 )
raise Exception( 'computeOffset:badmeasureOfs', len( b ) + ' measuremt(s), including ' + b[0] + ' are too noisy on Poincare section' )
# compute phase offsets for trials
mangle = conj( mangle ) / abs( mangle )
mangle = mangle.reshape(( len( mangle ), 1))
svm = mangle * svm
ofs = mean( svm, 0 )
if any( abs( ofs ) < .1 ):
b = find( abs( ofs ) < .1 )
raise Exception( 'computeOffset:badTrialOfs', len( b ) + ' trial(s), including ' + b[0] + ' are too noisy on Poincare section' )
return mangle, angle( ofs )
computeOffset = staticmethod( computeOffset )
def sliceN( x, s, h = None ):
"""
Slices a D-dimensional time series at a surface
INPUT:
x -- DxN -- data with colums being points in the time series
s -- N, array -- values of function that is zero and increasing on surface
h -- 1x1 (optional) -- threshold for transitions, transitions>h are ignored
OUPUT:
slc -- DxM -- positions at estimated value of s==0
idx -- M -- indices into columns of x indicating the last point before crossing the surface
"""
# checking for dimension agreement
if x.shape[1] != s.shape[0]:
raise Exception( 'sliceN:mismatch', 'Slice series must have matching columns with data' )
idx = find(( s[1:] > 0 ) & ( s[0:-1] <= 0 ))
idx = idx[idx < x.shape[1]]
if h is not None:
idx = idx( abs( s[idx] ) < h & abs( s[idx+1] ) < h );
N = x.shape[0]
if len( idx ) is 0:
return zeros(( N, 0 )), idx
wBfr = abs( s[idx] )
wBfr = wBfr.reshape((1, len( wBfr )))
wAfr = abs( s[idx+1] )
wAfr = wAfr.reshape((1, len( wAfr )))
slc = ( x[:,idx]*wAfr + x[:,idx+1]*wBfr ) / ( wBfr + wAfr )
return slc, idx
sliceN = staticmethod( sliceN )
def angleUp( zeta ):
"""
Convert complex data to increasing phase angles
INPUT:
zeta -- DxN complex
OUPUT:
returns DxN phase angle of zeta
"""
# unwind angles
th = unwrap( angle ( zeta ) )
# reverse decreasing sequences
bad = th[:,0] > th[:,-1]
if any( bad ):
th[bad,:] = -th[bad,:]
return th
angleUp = staticmethod( angleUp )
def trimCycle( zeta, y ):
"""
"""
# compute wrapped angle for hilbert transform
ph = Phaser.angleUp( zeta )
# estimate nCyc in each dimension
nCyc = abs( ph[:,-1] - ph[:,0] ) / 2 / pi
cycl = ceil( zeta.shape[1] / max( nCyc ) )
# if nCyc < 7 -> warning
# elif range(nCyc) > 2 -> warning
# else truncate beginning and ending cycles
if any( nCyc < 7 ):
warnings.warn( "tooShort n=%d" % nCyc.min(), PhaserWarning )
elif max( nCyc ) - min( nCyc ) > 2:
warnings.warn( "nCycMismatch min=%d max=%d" % (nCyc.min(),nCyc.max()) , PhaserWarning )
else:
zeta = zeta[:,cycl:-cycl]
y = y[:,cycl:-cycl]
return cycl, zeta, y
trimCycle = staticmethod( trimCycle )
def seriesCorrection( zetas, ordP ):
"""
Fourier series correction for data zetas up to order ordP
INPUT:
zetas -- [DxN_1, DxN_2, ...] -- list of D dimensional data to be corrected using Fourier series
ordP -- 1x1 -- Number of Fourier modes to be used
OUPUT:
Returns a list of FourierSeries object fitted to zetas
"""
# initialize proto phase series 2D list
proto = []
# loop over all samples of the ensemble
wgt = zeros( len( zetas ) )
for k in range( len( zetas ) ):
proto.append([])
# compute protophase angle (theta)
zeta = zetas[k]
N = zeta.shape[1]
theta = Phaser.angleUp( zeta )
# generate time variable
t = linspace( 0, 1, N )
# compute d_theta
dTheta = diff( theta, 1 )
# compute d_t
dt = diff( t )
# mid-sampling of protophase angle
th = ( theta[:,1:] + theta[:,:-1] ) / 2.0
# loop over all dimensions
for ki in range( zeta.shape[0] ):
# evaluate Fourier series for (d_theta/d_t)(theta)
# normalize Fourier coefficients to a mean of 1
fdThdt = FourierSeries().fit( ordP * 2, th[ki,:].reshape(( 1, th.shape[1])), dTheta[ki,:].reshape(( 1, dTheta.shape[1])) / dt )
fdThdt.coef = fdThdt.coef / fdThdt.m
fdThdt.m = array([1])
# evaluate Fourier series for (d_t/d_theta)(theta) based on Fourier
# approx of (d_theta/d_t)
# normalize Fourier coefficients to a mean of 1
fdtdTh = FourierSeries().fit( ordP, th[ki,:].reshape(( 1, th.shape[1])), 1 / fdThdt.val( th[ki,:].reshape(( 1, th.shape[1] )) ).T )
fdtdTh.coef = fdtdTh.coef / fdtdTh.m
fdtdTh.m = array([1])
# evaluate corrected phsae phi(theta) series as symbolic integration of
# (d_t/d_theta), this is off by a constant
proto[k].append(fdtdTh.integrate())
# compute sample weight based on sample length
wgt[k] = zeta.shape[0]
wgt = wgt / sum( wgt )
# return phase estimation as weighted average of phase estimation of all samples
proto_k = []
for ki in range( zetas[0].shape[0] ):
proto_k.append( FourierSeries.bigSum( [p[ki] for p in proto], wgt ))
return proto_k
seriesCorrection = staticmethod( seriesCorrection )
def test_sincos():
"""
Simple test/demo of Phaser, recovering a sine and cosine
Demo courtesy of Jimmy Sastra, U. Penn 2011
"""
from numpy import sin,cos,pi,array,linspace,cumsum,asarray,dot,ones
from pylab import plot, legend, axis, show, randint, randn, std,lstsq
# create separate trials and store times and data
dats=[]
t0 = []
period = 55 # i
phaseNoise = 0.5/sqrt(period)
snr = 20
N = 10
print N,"trials with:"
print "\tperiod %.2g"%period,"(samples)\n\tSNR %.2g"%snr,"\n\tphase noise %.2g"%phaseNoise,"(radian/cycle)"
print "\tlength = [",
for li in xrange(N):
l = randint(400,2000) # length of trial
dt = pi*2.0/period + randn(l)*0.07 # create noisy time steps
t = cumsum(dt)+randn()*2*pi # starting phase is random
raw = asarray([sin(t),cos(t)]) # signal
raw = raw + randn(*raw.shape)/snr # SNR=20 noise
t0.append(t)
dats.append( raw - raw.mean(axis=1)[:,newaxis] )
print l,
print "]"
# use points where sin=cos as poincare section
phr = Phaser( dats, psecfunc = lambda x : dot([1,-1],x) )
phi = [ phr.phaserEval( d ) for d in dats ] # extract phase
reg = array([linspace(0,1,t0[0].size),ones(t0[0].size)]).T
tt = dot( reg, lstsq(reg,t0[0])[0] )
plot(((tt-pi/4) % (2*pi))/pi-1, dats[0].T,'x')
plot( (phi[0].T % (2*pi))/pi-1, dats[0].T,'.')#plot data versus phase
legend(['sin(t)','cos(t)','sin(phi)','cos(phi)'])
axis([-1,1,-1.2,1.2])
show()
if __name__=="__main__":
test_sincos()
| gpl-2.0 |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/matplotlib/fontconfig_pattern.py | 8 | 6538 | """
A module for parsing and generating fontconfig patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
# This class is defined here because it must be available in:
# - The old-style config framework (:file:`rcsetup.py`)
# - The traits-based config framework (:file:`mpltraits.py`)
# - The font manager (:file:`font_manager.py`)
# It probably logically belongs in :file:`font_manager.py`, but
# placing it in any of these places would have created cyclical
# dependency problems, or an undesired dependency on traits even
# when the traits-based config framework is not used.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import re, sys
from pyparsing import Literal, ZeroOrMore, \
Optional, Regex, StringEnd, ParseException, Suppress
family_punc = r'\\\-:,'
family_unescape = re.compile(r'\\([%s])' % family_punc).sub
family_escape = re.compile(r'([%s])' % family_punc).sub
value_punc = r'\\=_:,'
value_unescape = re.compile(r'\\([%s])' % value_punc).sub
value_escape = re.compile(r'([%s])' % value_punc).sub
class FontconfigPatternParser(object):
"""A simple pyparsing-based parser for fontconfig-style patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
_constants = {
'thin' : ('weight', 'light'),
'extralight' : ('weight', 'light'),
'ultralight' : ('weight', 'light'),
'light' : ('weight', 'light'),
'book' : ('weight', 'book'),
'regular' : ('weight', 'regular'),
'normal' : ('weight', 'normal'),
'medium' : ('weight', 'medium'),
'demibold' : ('weight', 'demibold'),
'semibold' : ('weight', 'semibold'),
'bold' : ('weight', 'bold'),
'extrabold' : ('weight', 'extra bold'),
'black' : ('weight', 'black'),
'heavy' : ('weight', 'heavy'),
'roman' : ('slant', 'normal'),
'italic' : ('slant', 'italic'),
'oblique' : ('slant', 'oblique'),
'ultracondensed' : ('width', 'ultra-condensed'),
'extracondensed' : ('width', 'extra-condensed'),
'condensed' : ('width', 'condensed'),
'semicondensed' : ('width', 'semi-condensed'),
'expanded' : ('width', 'expanded'),
'extraexpanded' : ('width', 'extra-expanded'),
'ultraexpanded' : ('width', 'ultra-expanded')
}
def __init__(self):
family = Regex(r'([^%s]|(\\[%s]))*' %
(family_punc, family_punc)) \
.setParseAction(self._family)
size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)") \
.setParseAction(self._size)
name = Regex(r'[a-z]+') \
.setParseAction(self._name)
value = Regex(r'([^%s]|(\\[%s]))*' %
(value_punc, value_punc)) \
.setParseAction(self._value)
families =(family
+ ZeroOrMore(
Literal(',')
+ family)
).setParseAction(self._families)
point_sizes =(size
+ ZeroOrMore(
Literal(',')
+ size)
).setParseAction(self._point_sizes)
property =( (name
+ Suppress(Literal('='))
+ value
+ ZeroOrMore(
Suppress(Literal(','))
+ value)
)
| name
).setParseAction(self._property)
pattern =(Optional(
families)
+ Optional(
Literal('-')
+ point_sizes)
+ ZeroOrMore(
Literal(':')
+ property)
+ StringEnd()
)
self._parser = pattern
self.ParseException = ParseException
def parse(self, pattern):
"""
Parse the given fontconfig *pattern* and return a dictionary
of key/value pairs useful for initializing a
:class:`font_manager.FontProperties` object.
"""
props = self._properties = {}
try:
self._parser.parseString(pattern)
except self.ParseException as e:
raise ValueError(
"Could not parse font string: '%s'\n%s" % (pattern, e))
self._properties = None
self._parser.resetCache()
return props
def _family(self, s, loc, tokens):
return [family_unescape(r'\1', str(tokens[0]))]
def _size(self, s, loc, tokens):
return [float(tokens[0])]
def _name(self, s, loc, tokens):
return [str(tokens[0])]
def _value(self, s, loc, tokens):
return [value_unescape(r'\1', str(tokens[0]))]
def _families(self, s, loc, tokens):
self._properties['family'] = [str(x) for x in tokens]
return []
def _point_sizes(self, s, loc, tokens):
self._properties['size'] = [str(x) for x in tokens]
return []
def _property(self, s, loc, tokens):
if len(tokens) == 1:
if tokens[0] in self._constants:
key, val = self._constants[tokens[0]]
self._properties.setdefault(key, []).append(val)
else:
key = tokens[0]
val = tokens[1:]
self._properties.setdefault(key, []).extend(val)
return []
parse_fontconfig_pattern = FontconfigPatternParser().parse
def generate_fontconfig_pattern(d):
"""
Given a dictionary of key/value pairs, generates a fontconfig
pattern string.
"""
props = []
families = ''
size = ''
for key in 'family style variant weight stretch file size'.split():
val = getattr(d, 'get_' + key)()
if val is not None and val != []:
if type(val) == list:
val = [value_escape(r'\\\1', str(x)) for x in val if x is not None]
if val != []:
val = ','.join(val)
props.append(":%s=%s" % (key, val))
return ''.join(props)
| mit |
majetideepak/arrow | python/pyarrow/parquet.py | 1 | 51904 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from collections import defaultdict
from concurrent import futures
from functools import partial
from six.moves.urllib.parse import urlparse
import json
import numpy as np
import os
import re
import six
import warnings
import pyarrow as pa
import pyarrow.lib as lib
import pyarrow._parquet as _parquet
from pyarrow._parquet import (ParquetReader, Statistics, # noqa
FileMetaData, RowGroupMetaData,
ColumnChunkMetaData,
ParquetSchema, ColumnSchema)
from pyarrow.compat import guid
from pyarrow.filesystem import (LocalFileSystem, _ensure_filesystem,
resolve_filesystem_and_path)
from pyarrow.util import _is_path_like, _stringify_path
_URI_STRIP_SCHEMES = ('hdfs',)
def _parse_uri(path):
path = _stringify_path(path)
parsed_uri = urlparse(path)
if parsed_uri.scheme in _URI_STRIP_SCHEMES:
return parsed_uri.path
else:
# ARROW-4073: On Windows returning the path with the scheme
# stripped removes the drive letter, if any
return path
def _get_filesystem_and_path(passed_filesystem, path):
if passed_filesystem is None:
return resolve_filesystem_and_path(path, passed_filesystem)
else:
passed_filesystem = _ensure_filesystem(passed_filesystem)
parsed_path = _parse_uri(path)
return passed_filesystem, parsed_path
def _check_contains_null(val):
if isinstance(val, six.binary_type):
for byte in val:
if isinstance(byte, six.binary_type):
compare_to = chr(0)
else:
compare_to = 0
if byte == compare_to:
return True
elif isinstance(val, six.text_type):
return u'\x00' in val
return False
def _check_filters(filters):
"""
Check if filters are well-formed.
"""
if filters is not None:
if len(filters) == 0 or any(len(f) == 0 for f in filters):
raise ValueError("Malformed filters")
if isinstance(filters[0][0], six.string_types):
# We have encountered the situation where we have one nesting level
# too few:
# We have [(,,), ..] instead of [[(,,), ..]]
filters = [filters]
for conjunction in filters:
for col, op, val in conjunction:
if (
isinstance(val, list)
and all(_check_contains_null(v) for v in val)
or _check_contains_null(val)
):
raise NotImplementedError(
"Null-terminated binary strings are not supported as"
" filter values."
)
return filters
# ----------------------------------------------------------------------
# Reading a single Parquet file
class ParquetFile(object):
"""
Reader interface for a single Parquet file
Parameters
----------
source : str, pathlib.Path, pyarrow.NativeFile, or file-like object
Readable source. For passing bytes or buffer-like file containing a
Parquet file, use pyarorw.BufferReader
metadata : FileMetaData, default None
Use existing metadata object, rather than reading from file.
common_metadata : FileMetaData, default None
Will be used in reads for pandas schema metadata if not found in the
main file's metadata, no other uses at the moment
memory_map : boolean, default True
If the source is a file path, use a memory map to read file, which can
improve performance in some environments
"""
def __init__(self, source, metadata=None, common_metadata=None,
read_dictionary=None, memory_map=True):
self.reader = ParquetReader()
self.reader.open(source, use_memory_map=memory_map,
read_dictionary=read_dictionary, metadata=metadata)
self.common_metadata = common_metadata
self._nested_paths_by_prefix = self._build_nested_paths()
def _build_nested_paths(self):
paths = self.reader.column_paths
result = defaultdict(list)
def _visit_piece(i, key, rest):
result[key].append(i)
if len(rest) > 0:
nested_key = '.'.join((key, rest[0]))
_visit_piece(i, nested_key, rest[1:])
for i, path in enumerate(paths):
_visit_piece(i, path[0], path[1:])
return result
@property
def metadata(self):
return self.reader.metadata
@property
def schema(self):
return self.metadata.schema
@property
def num_row_groups(self):
return self.reader.num_row_groups
def read_row_group(self, i, columns=None, use_threads=True,
use_pandas_metadata=False):
"""
Read a single row group from a Parquet file
Parameters
----------
columns: list
If not None, only these columns will be read from the row group. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'
use_threads : boolean, default True
Perform multi-threaded column reads
use_pandas_metadata : boolean, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded
Returns
-------
pyarrow.table.Table
Content of the row group as a table (of columns)
"""
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
return self.reader.read_row_group(i, column_indices=column_indices,
use_threads=use_threads)
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
"""
Read a Table from Parquet format
Parameters
----------
columns: list
If not None, only these columns will be read from the file. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'
use_threads : boolean, default True
Perform multi-threaded column reads
use_pandas_metadata : boolean, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded
Returns
-------
pyarrow.table.Table
Content of the file as a table (of columns)
"""
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
return self.reader.read_all(column_indices=column_indices,
use_threads=use_threads)
def scan_contents(self, columns=None, batch_size=65536):
"""
Read contents of file with a single thread for indicated columns and
batch size. Number of rows in file is returned. This function is used
for benchmarking
Parameters
----------
columns : list of integers, default None
If None, scan all columns
batch_size : int, default 64K
Number of rows to read at a time internally
Returns
-------
num_rows : number of rows in file
"""
column_indices = self._get_column_indices(columns)
return self.reader.scan_contents(column_indices,
batch_size=batch_size)
def _get_column_indices(self, column_names, use_pandas_metadata=False):
if column_names is None:
return None
indices = []
for name in column_names:
if name in self._nested_paths_by_prefix:
indices.extend(self._nested_paths_by_prefix[name])
if use_pandas_metadata:
file_keyvalues = self.metadata.metadata
common_keyvalues = (self.common_metadata.metadata
if self.common_metadata is not None
else None)
if file_keyvalues and b'pandas' in file_keyvalues:
index_columns = _get_pandas_index_columns(file_keyvalues)
elif common_keyvalues and b'pandas' in common_keyvalues:
index_columns = _get_pandas_index_columns(common_keyvalues)
else:
index_columns = []
if indices is not None and index_columns:
indices += [self.reader.column_name_idx(descr)
for descr in index_columns
if not isinstance(descr, dict)]
return indices
_SPARK_DISALLOWED_CHARS = re.compile('[ ,;{}()\n\t=]')
def _sanitized_spark_field_name(name):
return _SPARK_DISALLOWED_CHARS.sub('_', name)
def _sanitize_schema(schema, flavor):
if 'spark' in flavor:
sanitized_fields = []
schema_changed = False
for field in schema:
name = field.name
sanitized_name = _sanitized_spark_field_name(name)
if sanitized_name != name:
schema_changed = True
sanitized_field = pa.field(sanitized_name, field.type,
field.nullable, field.metadata)
sanitized_fields.append(sanitized_field)
else:
sanitized_fields.append(field)
new_schema = pa.schema(sanitized_fields, metadata=schema.metadata)
return new_schema, schema_changed
else:
return schema, False
def _sanitize_table(table, new_schema, flavor):
# TODO: This will not handle prohibited characters in nested field names
if 'spark' in flavor:
column_data = [table[i].data for i in range(table.num_columns)]
return pa.Table.from_arrays(column_data, schema=new_schema)
else:
return table
_parquet_writer_arg_docs = """version : {"1.0", "2.0"}, default "1.0"
The Parquet format version, defaults to 1.0
use_dictionary : bool or list
Specify if we should use dictionary encoding in general or only for
some columns.
use_deprecated_int96_timestamps : boolean, default None
Write timestamps to INT96 Parquet format. Defaults to False unless enabled
by flavor argument. This take priority over the coerce_timestamps option.
coerce_timestamps : string, default None
Cast timestamps a particular resolution.
Valid values: {None, 'ms', 'us'}
data_page_size : int, default None
Set a target threshhold for the approximate encoded size of data
pages within a column chunk. If None, use the default data page
size of 1MByte.
allow_truncated_timestamps : boolean, default False
Allow loss of data when coercing timestamps to a particular
resolution. E.g. if microsecond or nanosecond data is lost when coercing to
'ms', do not raise an exception
compression : str or dict
Specify the compression codec, either on a general basis or per-column.
Valid values: {'NONE', 'SNAPPY', 'GZIP', 'LZO', 'BROTLI', 'LZ4', 'ZSTD'}
write_statistics : bool or list
Specify if we should write statistics in general (default is True) or only
for some columns.
flavor : {'spark'}, default None
Sanitize schema or set other compatibility options to work with
various target systems
filesystem : FileSystem, default None
If nothing passed, will be inferred from `where` if path-like, else
`where` is already a file-like object so no filesystem is needed."""
class ParquetWriter(object):
__doc__ = """
Class for incrementally building a Parquet file for Arrow tables
Parameters
----------
where : path or file-like object
schema : arrow Schema
{0}
**options : dict
If options contains a key `metadata_collector` then the
corresponding value is assumed to be a list (or any object with
`.append` method) that will be filled with file metadata instances
of dataset pieces.
""".format(_parquet_writer_arg_docs)
def __init__(self, where, schema, filesystem=None,
flavor=None,
version='1.0',
use_dictionary=True,
compression='snappy',
write_statistics=True,
use_deprecated_int96_timestamps=None, **options):
if use_deprecated_int96_timestamps is None:
# Use int96 timestamps for Spark
if flavor is not None and 'spark' in flavor:
use_deprecated_int96_timestamps = True
else:
use_deprecated_int96_timestamps = False
self.flavor = flavor
if flavor is not None:
schema, self.schema_changed = _sanitize_schema(schema, flavor)
else:
self.schema_changed = False
self.schema = schema
self.where = where
# If we open a file using a filesystem, store file handle so we can be
# sure to close it when `self.close` is called.
self.file_handle = None
filesystem, path = resolve_filesystem_and_path(where, filesystem)
if filesystem is not None:
sink = self.file_handle = filesystem.open(path, 'wb')
else:
sink = where
self._metadata_collector = options.pop('metadata_collector', None)
self.writer = _parquet.ParquetWriter(
sink, schema,
version=version,
compression=compression,
use_dictionary=use_dictionary,
write_statistics=write_statistics,
use_deprecated_int96_timestamps=use_deprecated_int96_timestamps,
**options)
self.is_open = True
def __del__(self):
if getattr(self, 'is_open', False):
self.close()
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
# return false since we want to propagate exceptions
return False
def write_table(self, table, row_group_size=None):
if self.schema_changed:
table = _sanitize_table(table, self.schema, self.flavor)
assert self.is_open
if not table.schema.equals(self.schema, check_metadata=False):
msg = ('Table schema does not match schema used to create file: '
'\ntable:\n{0!s} vs. \nfile:\n{1!s}'.format(table.schema,
self.schema))
raise ValueError(msg)
self.writer.write_table(table, row_group_size=row_group_size)
def close(self):
if self.is_open:
self.writer.close()
self.is_open = False
if self._metadata_collector is not None:
self._metadata_collector.append(self.writer.metadata)
if self.file_handle is not None:
self.file_handle.close()
def _get_pandas_index_columns(keyvalues):
return (json.loads(keyvalues[b'pandas'].decode('utf8'))
['index_columns'])
# ----------------------------------------------------------------------
# Metadata container providing instructions about reading a single Parquet
# file, possibly part of a partitioned dataset
class ParquetDatasetPiece(object):
"""
A single chunk of a potentially larger Parquet dataset to read. The
arguments will indicate to read either a single row group or all row
groups, and whether to add partition keys to the resulting pyarrow.Table
Parameters
----------
path : str or pathlib.Path
Path to file in the file system where this piece is located
open_file_func : callable
Function to use for obtaining file handle to dataset piece
partition_keys : list of tuples
[(column name, ordinal index)]
row_group : int, default None
Row group to load. By default, reads all row groups
"""
def __init__(self, path, open_file_func=partial(open, mode='rb'),
file_options=None, row_group=None, partition_keys=None):
self.path = _stringify_path(path)
self.open_file_func = open_file_func
self.row_group = row_group
self.partition_keys = partition_keys or []
self.file_options = file_options or {}
def __eq__(self, other):
if not isinstance(other, ParquetDatasetPiece):
return False
return (self.path == other.path and
self.row_group == other.row_group and
self.partition_keys == other.partition_keys)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return ('{0}({1!r}, row_group={2!r}, partition_keys={3!r})'
.format(type(self).__name__, self.path,
self.row_group,
self.partition_keys))
def __str__(self):
result = ''
if len(self.partition_keys) > 0:
partition_str = ', '.join('{0}={1}'.format(name, index)
for name, index in self.partition_keys)
result += 'partition[{0}] '.format(partition_str)
result += self.path
if self.row_group is not None:
result += ' | row_group={0}'.format(self.row_group)
return result
def get_metadata(self):
"""
Returns the file's metadata
Returns
-------
metadata : FileMetaData
"""
f = self.open()
return f.metadata
def open(self):
"""
Returns instance of ParquetFile
"""
reader = self.open_file_func(self.path)
if not isinstance(reader, ParquetFile):
reader = ParquetFile(reader, **self.file_options)
return reader
def read(self, columns=None, use_threads=True, partitions=None,
file=None, use_pandas_metadata=False):
"""
Read this piece as a pyarrow.Table
Parameters
----------
columns : list of column names, default None
use_threads : boolean, default True
Perform multi-threaded column reads
partitions : ParquetPartitions, default None
file : file-like object
passed to ParquetFile
Returns
-------
table : pyarrow.Table
"""
if self.open_file_func is not None:
reader = self.open()
elif file is not None:
reader = ParquetFile(file, **self.file_options)
else:
# try to read the local path
reader = ParquetFile(self.path, **self.file_options)
options = dict(columns=columns,
use_threads=use_threads,
use_pandas_metadata=use_pandas_metadata)
if self.row_group is not None:
table = reader.read_row_group(self.row_group, **options)
else:
table = reader.read(**options)
if len(self.partition_keys) > 0:
if partitions is None:
raise ValueError('Must pass partition sets')
# Here, the index is the categorical code of the partition where
# this piece is located. Suppose we had
#
# /foo=a/0.parq
# /foo=b/0.parq
# /foo=c/0.parq
#
# Then we assign a=0, b=1, c=2. And the resulting Table pieces will
# have a DictionaryArray column named foo having the constant index
# value as indicated. The distinct categories of the partition have
# been computed in the ParquetManifest
for i, (name, index) in enumerate(self.partition_keys):
# The partition code is the same for all values in this piece
indices = np.array([index], dtype='i4').repeat(len(table))
# This is set of all partition values, computed as part of the
# manifest, so ['a', 'b', 'c'] as in our example above.
dictionary = partitions.levels[i].dictionary
arr = pa.DictionaryArray.from_arrays(indices, dictionary)
table = table.append_column(name, arr)
return table
class PartitionSet(object):
"""A data structure for cataloguing the observed Parquet partitions at a
particular level. So if we have
/foo=a/bar=0
/foo=a/bar=1
/foo=a/bar=2
/foo=b/bar=0
/foo=b/bar=1
/foo=b/bar=2
Then we have two partition sets, one for foo, another for bar. As we visit
levels of the partition hierarchy, a PartitionSet tracks the distinct
values and assigns categorical codes to use when reading the pieces
"""
def __init__(self, name, keys=None):
self.name = name
self.keys = keys or []
self.key_indices = {k: i for i, k in enumerate(self.keys)}
self._dictionary = None
def get_index(self, key):
"""
Get the index of the partition value if it is known, otherwise assign
one
"""
if key in self.key_indices:
return self.key_indices[key]
else:
index = len(self.key_indices)
self.keys.append(key)
self.key_indices[key] = index
return index
@property
def dictionary(self):
if self._dictionary is not None:
return self._dictionary
if len(self.keys) == 0:
raise ValueError('No known partition keys')
# Only integer and string partition types are supported right now
try:
integer_keys = [int(x) for x in self.keys]
dictionary = lib.array(integer_keys)
except ValueError:
dictionary = lib.array(self.keys)
self._dictionary = dictionary
return dictionary
@property
def is_sorted(self):
return list(self.keys) == sorted(self.keys)
class ParquetPartitions(object):
def __init__(self):
self.levels = []
self.partition_names = set()
def __len__(self):
return len(self.levels)
def __getitem__(self, i):
return self.levels[i]
def equals(self, other):
if not isinstance(other, ParquetPartitions):
raise TypeError('`other` must be an instance of ParquetPartitions')
return (self.levels == other.levels and
self.partition_names == other.partition_names)
def __eq__(self, other):
try:
return self.equals(other)
except TypeError:
return NotImplemented
def __ne__(self, other):
# required for python 2, cython implements it by default
return not (self == other)
def get_index(self, level, name, key):
"""
Record a partition value at a particular level, returning the distinct
code for that value at that level. Example:
partitions.get_index(1, 'foo', 'a') returns 0
partitions.get_index(1, 'foo', 'b') returns 1
partitions.get_index(1, 'foo', 'c') returns 2
partitions.get_index(1, 'foo', 'a') returns 0
Parameters
----------
level : int
The nesting level of the partition we are observing
name : string
The partition name
key : string or int
The partition value
"""
if level == len(self.levels):
if name in self.partition_names:
raise ValueError('{0} was the name of the partition in '
'another level'.format(name))
part_set = PartitionSet(name)
self.levels.append(part_set)
self.partition_names.add(name)
return self.levels[level].get_index(key)
def filter_accepts_partition(self, part_key, filter, level):
p_column, p_value_index = part_key
f_column, op, f_value = filter
if p_column != f_column:
return True
f_type = type(f_value)
if isinstance(f_value, set):
if not f_value:
raise ValueError("Cannot use empty set as filter value")
if op not in {'in', 'not in'}:
raise ValueError("Op '%s' not supported with set value",
op)
if len(set([type(item) for item in f_value])) != 1:
raise ValueError("All elements of set '%s' must be of"
" same type", f_value)
f_type = type(next(iter(f_value)))
p_value = f_type((self.levels[level]
.dictionary[p_value_index]
.as_py()))
if op == "=" or op == "==":
return p_value == f_value
elif op == "!=":
return p_value != f_value
elif op == '<':
return p_value < f_value
elif op == '>':
return p_value > f_value
elif op == '<=':
return p_value <= f_value
elif op == '>=':
return p_value >= f_value
elif op == 'in':
return p_value in f_value
elif op == 'not in':
return p_value not in f_value
else:
raise ValueError("'%s' is not a valid operator in predicates.",
filter[1])
class ParquetManifest(object):
"""
"""
def __init__(self, dirpath, open_file_func=None, filesystem=None,
pathsep='/', partition_scheme='hive', metadata_nthreads=1):
filesystem, dirpath = _get_filesystem_and_path(filesystem, dirpath)
self.filesystem = filesystem
self.open_file_func = open_file_func
self.pathsep = pathsep
self.dirpath = _stringify_path(dirpath)
self.partition_scheme = partition_scheme
self.partitions = ParquetPartitions()
self.pieces = []
self._metadata_nthreads = metadata_nthreads
self._thread_pool = futures.ThreadPoolExecutor(
max_workers=metadata_nthreads)
self.common_metadata_path = None
self.metadata_path = None
self._visit_level(0, self.dirpath, [])
# Due to concurrency, pieces will potentially by out of order if the
# dataset is partitioned so we sort them to yield stable results
self.pieces.sort(key=lambda piece: piece.path)
if self.common_metadata_path is None:
# _common_metadata is a subset of _metadata
self.common_metadata_path = self.metadata_path
self._thread_pool.shutdown()
def _visit_level(self, level, base_path, part_keys):
fs = self.filesystem
_, directories, files = next(fs.walk(base_path))
filtered_files = []
for path in files:
full_path = self.pathsep.join((base_path, path))
if path.endswith('_common_metadata'):
self.common_metadata_path = full_path
elif path.endswith('_metadata'):
self.metadata_path = full_path
elif self._should_silently_exclude(path):
continue
else:
filtered_files.append(full_path)
# ARROW-1079: Filter out "private" directories starting with underscore
filtered_directories = [self.pathsep.join((base_path, x))
for x in directories
if not _is_private_directory(x)]
filtered_files.sort()
filtered_directories.sort()
if len(filtered_files) > 0 and len(filtered_directories) > 0:
raise ValueError('Found files in an intermediate '
'directory: {0}'.format(base_path))
elif len(filtered_directories) > 0:
self._visit_directories(level, filtered_directories, part_keys)
else:
self._push_pieces(filtered_files, part_keys)
def _should_silently_exclude(self, file_name):
return (file_name.endswith('.crc') or # Checksums
file_name.endswith('_$folder$') or # HDFS directories in S3
file_name.startswith('.') or # Hidden files starting with .
file_name.startswith('_') or # Hidden files starting with _
file_name in EXCLUDED_PARQUET_PATHS)
def _visit_directories(self, level, directories, part_keys):
futures_list = []
for path in directories:
head, tail = _path_split(path, self.pathsep)
name, key = _parse_hive_partition(tail)
index = self.partitions.get_index(level, name, key)
dir_part_keys = part_keys + [(name, index)]
# If you have less threads than levels, the wait call will block
# indefinitely due to multiple waits within a thread.
if level < self._metadata_nthreads:
future = self._thread_pool.submit(self._visit_level,
level + 1,
path,
dir_part_keys)
futures_list.append(future)
else:
self._visit_level(level + 1, path, dir_part_keys)
if futures_list:
futures.wait(futures_list)
def _parse_partition(self, dirname):
if self.partition_scheme == 'hive':
return _parse_hive_partition(dirname)
else:
raise NotImplementedError('partition schema: {0}'
.format(self.partition_scheme))
def _push_pieces(self, files, part_keys):
self.pieces.extend([
ParquetDatasetPiece(path, partition_keys=part_keys,
open_file_func=self.open_file_func)
for path in files
])
def _parse_hive_partition(value):
if '=' not in value:
raise ValueError('Directory name did not appear to be a '
'partition: {0}'.format(value))
return value.split('=', 1)
def _is_private_directory(x):
_, tail = os.path.split(x)
return tail.startswith('_') and '=' not in tail
def _path_split(path, sep):
i = path.rfind(sep) + 1
head, tail = path[:i], path[i:]
head = head.rstrip(sep)
return head, tail
EXCLUDED_PARQUET_PATHS = {'_SUCCESS'}
def _open_dataset_file(dataset, path, meta=None):
if dataset.fs is None or isinstance(dataset.fs, LocalFileSystem):
return ParquetFile(path, metadata=meta, memory_map=dataset.memory_map,
read_dictionary=dataset.read_dictionary,
common_metadata=dataset.common_metadata)
else:
return ParquetFile(dataset.fs.open(path, mode='rb'), metadata=meta,
memory_map=dataset.memory_map,
read_dictionary=dataset.read_dictionary,
common_metadata=dataset.common_metadata)
_read_docstring_common = """\
read_dictionary : list, default None
List of names or column paths (for nested types) to read directly
as DictionaryArray. Only supported for BYTE_ARRAY storage. To read
a flat column as dictionary-encoded pass the column name. For
nested types, you must pass the full column "path", which could be
something like level1.level2.list.item. Refer to the Parquet
file's schema to obtain the paths.
memory_map : boolean, default True
If the source is a file path, use a memory map to read file, which can
improve performance in some environments"""
class ParquetDataset(object):
__doc__ = """
Encapsulates details of reading a complete Parquet dataset possibly
consisting of multiple files and partitions in subdirectories
Parameters
----------
path_or_paths : str or List[str]
A directory name, single file name, or list of file names
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem
metadata : pyarrow.parquet.FileMetaData
Use metadata obtained elsewhere to validate file schemas
schema : pyarrow.parquet.Schema
Use schema obtained elsewhere to validate file schemas. Alternative to
metadata parameter
split_row_groups : boolean, default False
Divide files into pieces for each row group in the file
validate_schema : boolean, default True
Check that individual file schemas are all the same / compatible
filters : List[Tuple] or List[List[Tuple]] or None (default)
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``. This
implements partition-level (hive) filtering only, i.e., to prevent the
loading of some files of the dataset.
Predicates are expressed in disjunctive normal form (DNF). This means
that the innermost tuple describe a single column predicate. These
inner predicate make are all combined with a conjunction (AND) into a
larger predicate. The most outer list then combines all filters
with a disjunction (OR). By this, we should be able to express all
kinds of filters that are possible using boolean logic.
This function also supports passing in as List[Tuple]. These predicates
are evaluated as a conjunction. To express OR in predictates, one must
use the (preferred) List[List[Tuple]] notation.
metadata_nthreads: int, default 1
How many threads to allow the thread pool which is used to read the
dataset metadata. Increasing this is helpful to read partitioned
datasets.
{0}
""".format(_read_docstring_common)
def __init__(self, path_or_paths, filesystem=None, schema=None,
metadata=None, split_row_groups=False, validate_schema=True,
filters=None, metadata_nthreads=1,
read_dictionary=None, memory_map=True):
a_path = path_or_paths
if isinstance(a_path, list):
a_path = a_path[0]
self.fs, _ = _get_filesystem_and_path(filesystem, a_path)
if isinstance(path_or_paths, list):
self.paths = [_parse_uri(path) for path in path_or_paths]
else:
self.paths = _parse_uri(path_or_paths)
self.read_dictionary = read_dictionary
self.memory_map = memory_map
(self.pieces,
self.partitions,
self.common_metadata_path,
self.metadata_path) = _make_manifest(
path_or_paths, self.fs, metadata_nthreads=metadata_nthreads,
open_file_func=partial(_open_dataset_file, self))
if self.common_metadata_path is not None:
with self.fs.open(self.common_metadata_path) as f:
self.common_metadata = read_metadata(f, memory_map=memory_map)
else:
self.common_metadata = None
if metadata is None and self.metadata_path is not None:
with self.fs.open(self.metadata_path) as f:
self.metadata = read_metadata(f, memory_map=memory_map)
else:
self.metadata = metadata
self.schema = schema
self.split_row_groups = split_row_groups
if split_row_groups:
raise NotImplementedError("split_row_groups not yet implemented")
if filters is not None:
filters = _check_filters(filters)
self._filter(filters)
if validate_schema:
self.validate_schemas()
def equals(self, other):
if not isinstance(other, ParquetDataset):
raise TypeError('`other` must be an instance of ParquetDataset')
if self.fs.__class__ != other.fs.__class__:
return False
for prop in ('paths', 'memory_map', 'pieces', 'partitions',
'common_metadata_path', 'metadata_path',
'common_metadata', 'metadata', 'schema',
'split_row_groups'):
if getattr(self, prop) != getattr(other, prop):
return False
return True
def __eq__(self, other):
try:
return self.equals(other)
except TypeError:
return NotImplemented
def __ne__(self, other):
# required for python 2, cython implements it by default
return not (self == other)
def validate_schemas(self):
if self.metadata is None and self.schema is None:
if self.common_metadata is not None:
self.schema = self.common_metadata.schema
else:
self.schema = self.pieces[0].get_metadata().schema
elif self.schema is None:
self.schema = self.metadata.schema
# Verify schemas are all compatible
dataset_schema = self.schema.to_arrow_schema()
# Exclude the partition columns from the schema, they are provided
# by the path, not the DatasetPiece
if self.partitions is not None:
for partition_name in self.partitions.partition_names:
if dataset_schema.get_field_index(partition_name) != -1:
field_idx = dataset_schema.get_field_index(partition_name)
dataset_schema = dataset_schema.remove(field_idx)
for piece in self.pieces:
file_metadata = piece.get_metadata()
file_schema = file_metadata.schema.to_arrow_schema()
if not dataset_schema.equals(file_schema, check_metadata=False):
raise ValueError('Schema in {0!s} was different. \n'
'{1!s}\n\nvs\n\n{2!s}'
.format(piece, file_schema,
dataset_schema))
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
"""
Read multiple Parquet files as a single pyarrow.Table
Parameters
----------
columns : List[str]
Names of columns to read from the file
use_threads : boolean, default True
Perform multi-threaded column reads
use_pandas_metadata : bool, default False
Passed through to each dataset piece
Returns
-------
pyarrow.Table
Content of the file as a table (of columns)
"""
tables = []
for piece in self.pieces:
table = piece.read(columns=columns, use_threads=use_threads,
partitions=self.partitions,
use_pandas_metadata=use_pandas_metadata)
tables.append(table)
all_data = lib.concat_tables(tables)
if use_pandas_metadata:
# We need to ensure that this metadata is set in the Table's schema
# so that Table.to_pandas will construct pandas.DataFrame with the
# right index
common_metadata = self._get_common_pandas_metadata()
current_metadata = all_data.schema.metadata or {}
if common_metadata and b'pandas' not in current_metadata:
all_data = all_data.replace_schema_metadata({
b'pandas': common_metadata})
return all_data
def read_pandas(self, **kwargs):
"""
Read dataset including pandas metadata, if any. Other arguments passed
through to ParquetDataset.read, see docstring for further details
Returns
-------
pyarrow.Table
Content of the file as a table (of columns)
"""
return self.read(use_pandas_metadata=True, **kwargs)
def _get_common_pandas_metadata(self):
if self.common_metadata is None:
return None
keyvalues = self.common_metadata.metadata
return keyvalues.get(b'pandas', None)
def _filter(self, filters):
accepts_filter = self.partitions.filter_accepts_partition
def one_filter_accepts(piece, filter):
return all(accepts_filter(part_key, filter, level)
for level, part_key in enumerate(piece.partition_keys))
def all_filters_accept(piece):
return any(all(one_filter_accepts(piece, f) for f in conjunction)
for conjunction in filters)
self.pieces = [p for p in self.pieces if all_filters_accept(p)]
def _make_manifest(path_or_paths, fs, pathsep='/', metadata_nthreads=1,
open_file_func=None):
partitions = None
common_metadata_path = None
metadata_path = None
if isinstance(path_or_paths, list) and len(path_or_paths) == 1:
# Dask passes a directory as a list of length 1
path_or_paths = path_or_paths[0]
if _is_path_like(path_or_paths) and fs.isdir(path_or_paths):
manifest = ParquetManifest(path_or_paths, filesystem=fs,
open_file_func=open_file_func,
pathsep=fs.pathsep,
metadata_nthreads=metadata_nthreads)
common_metadata_path = manifest.common_metadata_path
metadata_path = manifest.metadata_path
pieces = manifest.pieces
partitions = manifest.partitions
else:
if not isinstance(path_or_paths, list):
path_or_paths = [path_or_paths]
# List of paths
if len(path_or_paths) == 0:
raise ValueError('Must pass at least one file path')
pieces = []
for path in path_or_paths:
if not fs.isfile(path):
raise IOError('Passed non-file path: {0}'
.format(path))
piece = ParquetDatasetPiece(path, open_file_func=open_file_func)
pieces.append(piece)
return pieces, partitions, common_metadata_path, metadata_path
_read_table_docstring = """
{0}
Parameters
----------
source: str, pyarrow.NativeFile, or file-like object
If a string passed, can be a single file name or directory name. For
file-like objects, only read a single file. Use pyarrow.BufferReader to
read a file contained in a bytes or buffer-like object
columns: list
If not None, only these columns will be read from the file. A column
name may be a prefix of a nested field, e.g. 'a' will select 'a.b',
'a.c', and 'a.d.e'
use_threads : boolean, default True
Perform multi-threaded column reads
metadata : FileMetaData
If separately computed
{1}
filters : List[Tuple] or List[List[Tuple]] or None (default)
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``. This
implements partition-level (hive) filtering only, i.e., to prevent the
loading of some files of the dataset if `source` is a directory.
See the docstring of ParquetDataset for more details.
Returns
-------
{2}
"""
def read_table(source, columns=None, use_threads=True, metadata=None,
use_pandas_metadata=False, memory_map=True,
read_dictionary=None, filesystem=None, filters=None):
if _is_path_like(source):
pf = ParquetDataset(source, metadata=metadata, memory_map=memory_map,
read_dictionary=read_dictionary,
filesystem=filesystem, filters=filters)
else:
pf = ParquetFile(source, metadata=metadata,
read_dictionary=read_dictionary,
memory_map=memory_map)
return pf.read(columns=columns, use_threads=use_threads,
use_pandas_metadata=use_pandas_metadata)
read_table.__doc__ = _read_table_docstring.format(
'Read a Table from Parquet format',
"\n".join((_read_docstring_common,
"""use_pandas_metadata : boolean, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded""")),
"""pyarrow.Table
Content of the file as a table (of columns)""")
def read_pandas(source, columns=None, use_threads=True, memory_map=True,
metadata=None, filters=None):
return read_table(source, columns=columns,
use_threads=use_threads,
metadata=metadata, memory_map=True,
filters=filters,
use_pandas_metadata=True)
read_pandas.__doc__ = _read_table_docstring.format(
'Read a Table from Parquet format, also reading DataFrame\n'
'index values if known in the file metadata',
_read_docstring_common,
"""pyarrow.Table
Content of the file as a Table of Columns, including DataFrame
indexes as columns""")
def write_table(table, where, row_group_size=None, version='1.0',
use_dictionary=True, compression='snappy',
write_statistics=True,
use_deprecated_int96_timestamps=None,
coerce_timestamps=None,
allow_truncated_timestamps=False,
data_page_size=None, flavor=None,
filesystem=None, **kwargs):
row_group_size = kwargs.pop('chunk_size', row_group_size)
use_int96 = use_deprecated_int96_timestamps
try:
with ParquetWriter(
where, table.schema,
filesystem=filesystem,
version=version,
flavor=flavor,
use_dictionary=use_dictionary,
write_statistics=write_statistics,
coerce_timestamps=coerce_timestamps,
data_page_size=data_page_size,
allow_truncated_timestamps=allow_truncated_timestamps,
compression=compression,
use_deprecated_int96_timestamps=use_int96,
**kwargs) as writer:
writer.write_table(table, row_group_size=row_group_size)
except Exception:
if _is_path_like(where):
try:
os.remove(_stringify_path(where))
except os.error:
pass
raise
write_table.__doc__ = """
Write a Table to Parquet format
Parameters
----------
table : pyarrow.Table
where: string or pyarrow.NativeFile
{0}
""".format(_parquet_writer_arg_docs)
def _mkdir_if_not_exists(fs, path):
if fs._isfilestore() and not fs.exists(path):
try:
fs.mkdir(path)
except OSError:
assert fs.exists(path)
def write_to_dataset(table, root_path, partition_cols=None, filesystem=None,
preserve_index=None, **kwargs):
"""Wrapper around parquet.write_table for writing a Table to
Parquet format by partitions.
For each combination of partition columns and values,
a subdirectories are created in the following
manner:
root_dir/
group1=value1
group2=value1
<uuid>.parquet
group2=value2
<uuid>.parquet
group1=valueN
group2=value1
<uuid>.parquet
group2=valueN
<uuid>.parquet
Parameters
----------
table : pyarrow.Table
root_path : string,
The root directory of the dataset
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem
partition_cols : list,
Column names by which to partition the dataset
Columns are partitioned in the order they are given
**kwargs : dict,
kwargs for write_table function. Using `metadata_collector` in
kwargs allows one to collect the file metadata instances of
dataset pieces. See docstring for `write_table` or
`ParquetWriter` for more information.
"""
if preserve_index is not None:
warnings.warn('preserve_index argument is deprecated as of 0.13.0 and '
'has no effect', DeprecationWarning)
fs, root_path = _get_filesystem_and_path(filesystem, root_path)
_mkdir_if_not_exists(fs, root_path)
if partition_cols is not None and len(partition_cols) > 0:
df = table.to_pandas(ignore_metadata=True)
partition_keys = [df[col] for col in partition_cols]
data_df = df.drop(partition_cols, axis='columns')
data_cols = df.columns.drop(partition_cols)
if len(data_cols) == 0:
raise ValueError('No data left to save outside partition columns')
subschema = table.schema
# ARROW-2891: Ensure the output_schema is preserved when writing a
# partitioned dataset
for col in table.schema.names:
if col in partition_cols:
subschema = subschema.remove(subschema.get_field_index(col))
for keys, subgroup in data_df.groupby(partition_keys):
if not isinstance(keys, tuple):
keys = (keys,)
subdir = '/'.join(
['{colname}={value}'.format(colname=name, value=val)
for name, val in zip(partition_cols, keys)])
subtable = pa.Table.from_pandas(subgroup, preserve_index=False,
schema=subschema, safe=False)
prefix = '/'.join([root_path, subdir])
_mkdir_if_not_exists(fs, prefix)
outfile = guid() + '.parquet'
full_path = '/'.join([prefix, outfile])
with fs.open(full_path, 'wb') as f:
write_table(subtable, f, **kwargs)
else:
outfile = guid() + '.parquet'
full_path = '/'.join([root_path, outfile])
with fs.open(full_path, 'wb') as f:
write_table(table, f, **kwargs)
def write_metadata(schema, where, version='1.0',
use_deprecated_int96_timestamps=False,
coerce_timestamps=None):
"""
Write metadata-only Parquet file from schema
Parameters
----------
schema : pyarrow.Schema
where: string or pyarrow.NativeFile
version : {"1.0", "2.0"}, default "1.0"
The Parquet format version, defaults to 1.0
use_deprecated_int96_timestamps : boolean, default False
Write nanosecond resolution timestamps to INT96 Parquet format
coerce_timestamps : string, default None
Cast timestamps a particular resolution.
Valid values: {None, 'ms', 'us'}
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem
"""
writer = ParquetWriter(
where, schema, version=version,
use_deprecated_int96_timestamps=use_deprecated_int96_timestamps,
coerce_timestamps=coerce_timestamps)
writer.close()
def read_metadata(where, memory_map=False):
"""
Read FileMetadata from footer of a single Parquet file
Parameters
----------
where : string (filepath) or file-like object
memory_map : boolean, default False
Create memory map when the source is a file path
Returns
-------
metadata : FileMetadata
"""
return ParquetFile(where, memory_map=memory_map).metadata
def read_schema(where, memory_map=False):
"""
Read effective Arrow schema from Parquet file metadata
Parameters
----------
where : string (filepath) or file-like object
memory_map : boolean, default False
Create memory map when the source is a file path
Returns
-------
schema : pyarrow.Schema
"""
return ParquetFile(where, memory_map=memory_map).schema.to_arrow_schema()
| apache-2.0 |
sebp/scikit-survival | sksurv/svm/naive_survival_svm.py | 1 | 7047 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import itertools
import numpy
from scipy.special import comb
from sklearn.svm import LinearSVC
from sklearn.utils import check_random_state
from ..base import SurvivalAnalysisMixin
from ..exceptions import NoComparablePairException
from ..util import check_arrays_survival
class NaiveSurvivalSVM(SurvivalAnalysisMixin, LinearSVC):
"""Naive version of linear Survival Support Vector Machine.
Uses regular linear support vector classifier (liblinear).
A new set of samples is created by building the difference between any two feature
vectors in the original data, thus this version requires `O(n_samples^2)` space.
See :class:`sksurv.svm.HingeLossSurvivalSVM` for the kernel naive survival SVM.
.. math::
\\min_{\\mathbf{w}}\\quad
\\frac{1}{2} \\lVert \\mathbf{w} \\rVert_2^2
+ \\gamma \\sum_{i = 1}^n \\xi_i \\\\
\\text{subject to}\\quad
\\mathbf{w}^\\top \\mathbf{x}_i - \\mathbf{w}^\\top \\mathbf{x}_j \\geq 1 - \\xi_{ij},\\quad
\\forall (i, j) \\in \\mathcal{P}, \\\\
\\xi_i \\geq 0,\\quad \\forall (i, j) \\in \\mathcal{P}.
\\mathcal{P} = \\{ (i, j) \\mid y_i > y_j \\land \\delta_j = 1 \\}_{i,j=1,\\dots,n}.
See [1]_, [2]_ for further description.
Parameters
----------
alpha : float, positive, default: 1.0
Weight of penalizing the squared hinge loss in the objective function.
loss : string, 'hinge' or 'squared_hinge', default: 'squared_hinge'
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : 'l1' | 'l2', default: 'l2'
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, default: True
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional, default: 1e-4
Tolerance for stopping criteria.
verbose : int, default: 0
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None, default: None
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, default: 1000
The maximum number of iterations to be run.
See also
--------
sksurv.svm.FastSurvivalSVM
Alternative implementation with reduced time complexity for training.
References
----------
.. [1] Van Belle, V., Pelckmans, K., Suykens, J. A., & Van Huffel, S.
Support Vector Machines for Survival Analysis. In Proc. of the 3rd Int. Conf.
on Computational Intelligence in Medicine and Healthcare (CIMED). 1-8. 2007
.. [2] Evers, L., Messow, C.M.,
"Sparse kernel methods for high-dimensional survival data",
Bioinformatics 24(14), 1632-8, 2008.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=False, tol=1e-4,
alpha=1.0, verbose=0, random_state=None, max_iter=1000):
super().__init__(penalty=penalty,
loss=loss,
dual=dual,
tol=tol,
verbose=verbose,
random_state=random_state,
max_iter=max_iter,
fit_intercept=False)
self.alpha = alpha
def _get_survival_pairs(self, X, y, random_state): # pylint: disable=no-self-use
X, event, time = check_arrays_survival(X, y)
idx = numpy.arange(X.shape[0], dtype=int)
random_state.shuffle(idx)
n_pairs = int(comb(X.shape[0], 2))
x_pairs = numpy.empty((n_pairs, X.shape[1]), dtype=float)
y_pairs = numpy.empty(n_pairs, dtype=numpy.int8)
k = 0
for xi, xj in itertools.combinations(idx, 2):
if time[xi] > time[xj] and event[xj]:
numpy.subtract(X[xi, :], X[xj, :], out=x_pairs[k, :])
y_pairs[k] = 1
k += 1
elif time[xi] < time[xj] and event[xi]:
numpy.subtract(X[xi, :], X[xj, :], out=x_pairs[k, :])
y_pairs[k] = -1
k += 1
elif time[xi] == time[xj] and (event[xi] or event[xj]):
numpy.subtract(X[xi, :], X[xj, :], out=x_pairs[k, :])
y_pairs[k] = 1 if event[xj] else -1
k += 1
x_pairs.resize((k, X.shape[1]), refcheck=False)
y_pairs.resize(k, refcheck=False)
return x_pairs, y_pairs
def fit(self, X, y, sample_weight=None):
"""Build a survival support vector machine model from training data.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
sample_weight : array-like, shape = (n_samples,), optional
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
Returns
-------
self
"""
random_state = check_random_state(self.random_state)
x_pairs, y_pairs = self._get_survival_pairs(X, y, random_state)
if x_pairs.shape[0] == 0:
raise NoComparablePairException("Data has no comparable pairs, cannot fit model.")
self.C = self.alpha
return super().fit(x_pairs, y_pairs, sample_weight=sample_weight)
def predict(self, X):
"""Rank samples according to survival times
Lower ranks indicate shorter survival, higher ranks longer survival.
Parameters
----------
X : array-like, shape = (n_samples, n_features,)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted ranks.
"""
return -self.decision_function(X)
| gpl-3.0 |
holdenk/spark | python/pyspark/sql/tests/test_pandas_udf_typehints.py | 22 | 9603 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import inspect
from typing import Union, Iterator, Tuple
from pyspark.sql.functions import mean, lit
from pyspark.testing.sqlutils import ReusedSQLTestCase, \
have_pandas, have_pyarrow, pandas_requirement_message, \
pyarrow_requirement_message
from pyspark.sql.pandas.typehints import infer_eval_type
from pyspark.sql.pandas.functions import pandas_udf, PandasUDFType
from pyspark.sql import Row
if have_pandas:
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class PandasUDFTypeHintsTests(ReusedSQLTestCase):
def test_type_annotation_scalar(self):
def func(col: pd.Series) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.DataFrame, col1: pd.Series) -> pd.DataFrame:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.DataFrame, *args: pd.Series) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.Series, *, col2: pd.DataFrame) -> pd.DataFrame:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: Union[pd.Series, pd.DataFrame], *, col2: pd.DataFrame) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def test_type_annotation_scalar_iter(self):
def func(iter: Iterator[pd.Series]) -> Iterator[pd.Series]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def func(iter: Iterator[Tuple[pd.DataFrame, pd.Series]]) -> Iterator[pd.DataFrame]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def func(iter: Iterator[Tuple[pd.DataFrame, ...]]) -> Iterator[pd.Series]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def func(
iter: Iterator[Tuple[Union[pd.DataFrame, pd.Series], ...]]
) -> Iterator[pd.Series]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def test_type_annotation_group_agg(self):
def func(col: pd.Series) -> str:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.DataFrame, col1: pd.Series) -> int:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.DataFrame, *args: pd.Series) -> Row:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame) -> str:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.Series, *, col2: pd.DataFrame) -> float:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: Union[pd.Series, pd.DataFrame], *, col2: pd.DataFrame) -> float:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def test_type_annotation_negative(self):
def func(col: str) -> pd.Series:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*str",
infer_eval_type, inspect.signature(func))
def func(col: pd.DataFrame, col1: int) -> pd.DataFrame:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*int",
infer_eval_type, inspect.signature(func))
def func(col: Union[pd.DataFrame, str], col1: int) -> pd.DataFrame:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*str",
infer_eval_type, inspect.signature(func))
def func(col: pd.Series) -> Tuple[pd.DataFrame]:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*Tuple",
infer_eval_type, inspect.signature(func))
def func(col, *args: pd.Series) -> pd.Series:
pass
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(func))
def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame):
pass
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(func))
def func(col: pd.Series, *, col2) -> pd.DataFrame:
pass
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(func))
def test_scalar_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
def plus_one(v: Union[pd.Series, pd.DataFrame]) -> pd.Series:
return v + 1
plus_one = pandas_udf("long")(plus_one)
actual = df.select(plus_one(df.v).alias("plus_one"))
expected = df.selectExpr("(v + 1) as plus_one")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_scalar_iter_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
def plus_one(itr: Iterator[pd.Series]) -> Iterator[pd.Series]:
for s in itr:
yield s + 1
plus_one = pandas_udf("long")(plus_one)
actual = df.select(plus_one(df.v).alias("plus_one"))
expected = df.selectExpr("(v + 1) as plus_one")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_group_agg_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
def weighted_mean(v: pd.Series, w: pd.Series) -> float:
return np.average(v, weights=w)
weighted_mean = pandas_udf("double")(weighted_mean)
actual = df.groupby('id').agg(weighted_mean(df.v, lit(1.0))).sort('id')
expected = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_group_apply_in_pandas(self):
df = self.spark.range(10)
def pandas_plus_one(v: pd.DataFrame) -> pd.DataFrame:
return v + 1
actual = df.groupby('id').applyInPandas(pandas_plus_one, schema=df.schema).sort('id')
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_cogroup_apply_in_pandas(self):
df = self.spark.range(10)
def pandas_plus_one(left: pd.DataFrame, right: pd.DataFrame) -> pd.DataFrame:
return left + 1
actual = df.groupby('id').cogroup(
self.spark.range(10).groupby("id")
).applyInPandas(pandas_plus_one, schema=df.schema).sort('id')
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_map_in_pandas(self):
df = self.spark.range(10)
def pandas_plus_one(iter: Iterator[pd.DataFrame]) -> Iterator[pd.DataFrame]:
return map(lambda v: v + 1, iter)
actual = df.mapInPandas(pandas_plus_one, schema=df.schema)
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_typehints import * # noqa: #401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
pli1988/portfolioFactory | portfolioFactory/metrics/retMetrics.py | 1 | 2342 | """
retMetrics is a module that contains a collection of functions to compute
return metrics on Pandas timeseries.
Author: Peter Li
"""
import pandas as pd
import numpy as np
from ..utils import utils as utils
from ..utils import customExceptions as customExceptions
def main():
pass
def averageHorizonReturn(data, horizon):
''' Function to calculate average returns over a horizon.
averageHorizonReturn computes the average of rolling horizon returns
Example: average 1-Year return
>> averageHorizonReturn(data, 12)
Input:
- data (timeseries): timeseris of monthly retun data
- horizon (int): window size for rolling analysis
Returns:
- averageRollingReturn (scalar)
'''
cleanData = utils.processData(data)
if (1 <= horizon <= len(cleanData)) & isinstance(horizon, int):
return np.mean(rollingReturn(cleanData, horizon))
else:
raise customExceptions.invalidInput('averageHorizonReturn')
def cumulativeReturn(data):
''' Function to calculate cumulative returns.
Input:
- data (timeseries): timeseris of monthly retun data
Returns:
- cumulative return (scalar)
'''
cleanData = utils.processData(data)
return np.prod(1 + cleanData) - 1
def rollingReturn(data, horizon):
''' Function to calculate rolling returns over a horizon.
rollingReturn computes the returns over a horizon
Example: average 1-Year return
>> averageHorizonReturn(data, 12)
Input:
- data (timeseries): timeseris of monthly retun data
- horizon (int): window size for rolling analysis
Returns:
- rollingReturn (timeseries): timeseries of the same size as data
'''
cleanData = utils.processData(data)
if (1 <= horizon <= len(cleanData)) & isinstance(horizon, int):
# Calculate rolling returns
rollingReturns = pd.rolling_apply(cleanData, horizon, lambda x: np.prod(1 + x) - 1)
return rollingReturns
else:
raise customExceptions.invalidInput('averageHorizonReturn')
if __name__ == "__main__":
main()
| mit |
vdods/heisenberg | attic/shooting_method_2.py | 1 | 15304 | import abc
import itertools
import library.monte_carlo
import numpy as np
import scipy.integrate
import scipy.linalg
import sympy as sp
import time
import vorpy.symbolic
"""
Notes
Define "return map" R : T^* Q -> T^* Q (really R^3xR^3 -> R^3xR^3, because it's coordinate dependent):
R(q,p) is defined as the closest point (in the coordinate chart R^3xR^3 for T^* Q) to (q,p) in the
sequence of points in the solution to the orbital curve for initial condition (q,p).
Define f : T^* Q -> R, (q,p) |-> 1/2 * |(q,p) - R(q,p)|^2
Use gradient descent to find critical points of f.
The gradient of f depends on the gradient of R. This can be computed numerically using a least-squares
approximation of the first-order Taylor polynomial of R.
Select initial conditions for the gradient descent to be on the H(q,p) = 0 submanifold, probably
by picking 5 coordinates at random and solving for the 6th.
Symmetry condition: Define symmetry via map Omega : T^* Q -> T^* Q (e.g. rotation through 2*pi/3).
Define R_Omega to give point closest to Omega(q,p). Then f_Omega is defined as
f_Omega(q,p) := 1/2 * |Omega(q,p) - R_Omega(q,p)|^2,
and the gradient of f_Omega depends on the gradient of Omega and R_Omega.
TODO
- Use energy-conserving integrator
- The 7 fold solution is super close to closing, and the optimization doesn't improve much.
Perturb it (but keep it zero-energy) and see if the optimizer can close it back up.
- I think the period detection isn't fully correct for the following reason. Often times
a curve will be quasi-periodic, or have a really high order of symmetry resulting in
a very high period. Probably what we actually want to happen is that the first reasonable
candidate for period is selected, so that the symmetry order is relatively low, and the
optimizer then tries to close up that curve.
Also, we must guarantee that the period computation picks analogous points on the curve,
meaning that they come from similar time values (and not e.g. several loops later in time).
"""
def define_canonical_symplectic_form_and_inverse (*, configuration_space_dimension, dtype):
# If the tautological one-form on the cotangent bundle is
# tau := p dq
# then the symplectic form is
# omega := -dtau = -dq wedge dp
# which, in the coordinates (q_0, q_1, p_0, p_1), has the matrix
# [ 0 0 -1 0 ]
# [ 0 0 0 -1 ]
# [ 1 0 0 0 ]
# [ 0 1 0 0 ],
# or in matrix notation, with I denoting the 2x2 identity matrix,
# [ 0 -I ]
# [ I 0 ],
assert configuration_space_dimension > 0
# Abbreviations
csd = configuration_space_dimension
psd = 2*csd
canonical_symplectic_form = np.ndarray((psd,psd), dtype=dtype)
# Fill the whole thing with zeros.
canonical_symplectic_form.fill(dtype(0))
# Upper right block diagonal is -1, lower left block diagonal is 1.
for i in range(csd):
canonical_symplectic_form[i,csd+i] = dtype(-1)
canonical_symplectic_form[csd+i,i] = dtype( 1)
canonical_symplectic_form_inverse = -canonical_symplectic_form
return canonical_symplectic_form,canonical_symplectic_form_inverse
def symplectic_gradient_of (F, X, *, canonical_symplectic_form_inverse=None, dtype=None):
assert len(X)%2 == 0, 'X must be a phase space element, which in particular means it must be even dimensional.'
if canonical_symplectic_form_inverse is None:
assert dtype is not None, 'If canonical_symplectic_form_inverse is None, then dtype must not be None.'
_,canonical_symplectic_form_inverse = define_canonical_symplectic_form_and_inverse(configuration_space_dimension=X.shape[0]//2, dtype=dtype)
return np.dot(canonical_symplectic_form_inverse, vorpy.symbolic.D(F,X))
def quadratic_min (f_v):
assert len(f_v) == 3, 'require 3 values'
c = f_v[1]
b = 0.5*(f_v[2] - f_v[0])
a = 0.5*(f_v[2] + f_v[0]) - f_v[1]
x = -0.5*b/a
return a*x**2 + b*x + c
class DynamicsContext(metaclass=abc.ABCMeta):
@abc.abstractmethod
def configuration_space_dimension (self):
pass
@abc.abstractmethod
def hamiltonian (self, X):
pass
@abc.abstractmethod
def hamiltonian_vector_field (self, X, t):
pass
def phase_space_dimension (self):
return 2*self.configuration_space_dimension()
class HeisenbergDynamicsContext(DynamicsContext):
def __init__ (self):
pass
def configuration_space_dimension (self):
return 3
# This is the hamiltonian (energy) function.
@staticmethod
def hamiltonian (X, sqrt=np.sqrt, pi=np.pi):
assert len(X) == 6, "X must be a 6-vector"
x = X[0]
y = X[1]
z = X[2]
p_x = X[3]
p_y = X[4]
p_z = X[5]
alpha = 2/pi
# alpha = 1.0
beta = 16
r_squared = x**2 + y**2
mu = r_squared**2 + beta*z**2
P_x = p_x - y*p_z/2
P_y = p_y + x*p_z/2
return (P_x**2 + P_y**2)/2 - alpha/sqrt(mu)
# \omega^-1 * dH (i.e. the symplectic gradient of H) is the hamiltonian vector field for this system.
# X is the list of coordinates [x, y, z, p_x, p_y, p_z].
# t is the time at which to evaluate the flow. This particular vector field is independent of time.
#
# If the tautological one-form on the cotangent bundle is
# tau := p dq
# then the symplectic form is
# omega := -dtau = -dq wedge dp
# which, in the coordinates (q_0, q_1, p_0, p_1), has the matrix
# [ 0 0 -1 0 ]
# [ 0 0 0 -1 ]
# [ 1 0 0 0 ]
# [ 0 1 0 0 ],
# or in matrix notation, with I denoting the 2x2 identity matrix,
# [ 0 -I ]
# [ I 0 ],
# having inverse
# [ 0 I ]
# [ -I 0 ].
# With dH:
# dH = dH/dq * dq + dH/dp * dp, (here, dH/dq denotes the partial of H w.r.t. q)
# or expressed in coordinates as
# [ dH/dq ]
# [ dH/dp ]
# it follows that the sympletic gradient of H is
# dH/dp * dq - dH/dq * dp
# or expressed in coordinates as
# [ dH/dp ]
# [ -dH/dq ],
# which is Hamilton's equations.
def hamiltonian_vector_field (self, t, X): # NOTE: t comes first, because of the convention of scipy.integrate.ode
assert len(X) == 6, "must have 6 coordinates"
x = X[0]
y = X[1]
z = X[2]
p_x = X[3]
p_y = X[4]
p_z = X[5]
P_x = p_x - 0.5*y*p_z
P_y = p_y + 0.5*x*p_z
r = x**2 + y**2
# beta = 1.0/16.0
beta = 16.0
mu = r**2 + beta*z**2
alpha = 2.0/np.pi
# alpha = 1.0
alpha_times_mu_to_neg_three_halves = alpha*mu**(-1.5)
return np.array(
[
P_x, \
P_y, \
0.5*x*P_y - 0.5*y*P_x, \
-0.5*P_y*p_z - alpha_times_mu_to_neg_three_halves*r*2.0*x, \
0.5*P_x*p_z - alpha_times_mu_to_neg_three_halves*r*2.0*y, \
-beta*alpha_times_mu_to_neg_three_halves*z
],
dtype=float
)
@staticmethod
def initial_condition ():
# alpha = 2/pi, beta = 16
# Symbolically solve H(1,0,0,0,1,p_z) = 0 for p_z.
p_z = sp.var('p_z')
zero = sp.Integer(0)
one = sp.Integer(1)
# H = HeisenbergDynamicsContext.hamiltonian(np.array([one, zero, zero, zero, one, p_z], dtype=object), sqrt=sp.sqrt, pi=sp.pi)
H = HeisenbergDynamicsContext.hamiltonian(np.array([one/2, zero, zero, zero, one, p_z], dtype=object), sqrt=sp.sqrt, pi=sp.pi)
print('H = {0}'.format(H))
p_z_solution = np.max(sp.solve(H, p_z))
print('p_z = {0}'.format(p_z_solution))
p_z_solution = float(p_z_solution)
# X_0 = np.array([1.0, 0.0, 0.0, 0.0, 1.0, p_z_solution])
X_0 = np.array([0.5, 0.0, 0.0, 0.0, 1.0, p_z_solution])
return X_0
class ShootingMethodObjective:
def __init__ (self, *, dynamics_context, X_0, t_max, t_delta):
self.__dynamics_context = dynamics_context
self.X_0 = X_0
self.__X_v = None
self.t_max = t_max
self.t_delta = t_delta
self.__Q_v = None
self.__Q_global_min_index = None
self.__objective = None
def configuration_space_dimension (self):
return self.__dynamics_context.configuration_space_dimension()
def flow_curve (self):
if self.__X_v is None:
# Compute the flow curve using X_0 as initial condition
# Taken from http://stackoverflow.com/questions/16973036/odd-scipy-ode-integration-error
ode = scipy.integrate.ode(self.__dynamics_context.hamiltonian_vector_field)
# ode.set_integrator('vode', nsteps=500, method='bdf') # This seems faster than dopri5
# ode.set_integrator('vode', nsteps=1000, method='bdf') # This seems faster than dopri5
ode.set_integrator('dopri5', nsteps=500)
ode.set_initial_value(self.X_0, 0.0)
start_time = time.time()
t_v = [0.0]
X_v_as_list = [self.X_0]
while ode.successful() and ode.t < t_max:
ode.integrate(ode.t + t_delta)
# print(ode.t)
t_v.append(ode.t)
X_v_as_list.append(ode.y)
print('integration took {0} seconds'.format(time.time() - start_time))
self.__t_v = t_v
self.__X_v = np.copy(X_v_as_list)
return self.__X_v
def t_v (self):
if self.__t_v is None:
self.flow_curve()
assert self.__t_v is not None
return self.__t_v
def squared_distance_function (self):
if self.__Q_v is None:
X_0 = self.X_0
X_v = self.flow_curve()
# Let s denote squared distance function s(t) := 1/2 |X_0 - flow_of_X_0(t))|^2
self.__Q_v = 0.5 * np.sum(np.square(X_v - X_0), axis=-1)
return self.__Q_v
def objective (self):
if self.__objective is None:
self.compute_Q_global_min_index_and_objective()
return self.__objective
def Q_global_min_index (self):
if self.__Q_global_min_index is None:
self.compute_Q_global_min_index_and_objective()
return self.__Q_global_min_index
def closest_approach_point (self):
return self.flow_curve()[self.Q_global_min_index()]
def __call__ (self):
return self.objective()
def compute_Q_global_min_index_and_objective (self):
X_0 = self.X_0
X_v = self.flow_curve()
self.__Q_v = Q_v = self.squared_distance_function()
local_min_index_v = [i for i in range(1,len(Q_v)-1) if Q_v[i-1] > Q_v[i] and Q_v[i] < Q_v[i+1]]
Q_local_min_v = [Q_v[i] for i in local_min_index_v]
try:
Q_local_min_min_index = np.argmin(Q_local_min_v)
self.__Q_global_min_index = _Q_global_min_index = local_min_index_v[Q_local_min_min_index]
if False:
assert 1 <= _Q_global_min_index < len(Q_v)-1
self.__objective = quadratic_min(Q_v[_Q_global_min_index-1:_Q_global_min_index+2])
# Some tests show this discrepancy to be on the order of 1.0e-9
print('self.__objective - Q_v[_Q_global_min_index] = {0}'.format(self.__objective - Q_v[_Q_global_min_index]))
else:
self.__objective = Q_v[_Q_global_min_index]
except ValueError:
# If there was no local min, then use the last time value
self.__Q_global_min_index = len(Q_v)-1
self.__objective = Q_v[self.__Q_global_min_index]
def evaluate_shooting_method_objective (dynamics_context, X_0, t_max, t_delta):
return ShootingMethodObjective(dynamics_context=dynamics_context, X_0=X_0, t_max=t_max, t_delta=t_delta)()
if __name__ == '__main__':
import matplotlib.pyplot as plt
dynamics_context = HeisenbergDynamicsContext()
X_0 = HeisenbergDynamicsContext.initial_condition()
t_max = 60.0
t_delta = 0.01
smo_0 = ShootingMethodObjective(dynamics_context=dynamics_context, X_0=X_0, t_max=t_max, t_delta=t_delta)
flow_curve_0 = smo_0.flow_curve()
optimizer = library.monte_carlo.MonteCarlo(lambda x_0:evaluate_shooting_method_objective(dynamics_context, x_0, t_max, t_delta), X_0, 1.0e-12, 1.0e-5, 12345)
try:
# for i in range(10000):
for i in range(100):
optimizer.compute_next_step()
print('i = {0}, obj = {1}'.format(i, optimizer.obj_history_v[-1]))
except KeyboardInterrupt:
print('got KeyboardInterrupt -- halting optimization')
X_opt = optimizer.parameter_history_v[-1]
smo_opt = ShootingMethodObjective(dynamics_context=dynamics_context, X_0=X_opt, t_max=t_max, t_delta=t_delta)
flow_curve_opt = smo_opt.flow_curve()
print('X_0 = {0}'.format(X_0))
print('X_opt = {0}'.format(X_opt))
print('flow_curve_0[0] = {0}'.format(flow_curve_0[0]))
print('flow_curve_0[-1] = {0}'.format(flow_curve_0[-1]))
print('flow_curve_opt[0] = {0}'.format(flow_curve_opt[0]))
print('flow_curve_opt[-1] = {0}'.format(flow_curve_opt[-1]))
def plot_stuff (*, axis_v, smo, name):
flow_curve = smo.flow_curve()
axis = axis_v[0]
axis.set_title('{0} curve'.format(name))
axis.plot(flow_curve[:,0], flow_curve[:,1])
axis.plot(flow_curve[0,0], flow_curve[0,1], 'o', color='green', alpha=0.5)
axis.plot(flow_curve[smo.Q_global_min_index(),0], flow_curve[smo.Q_global_min_index(),1], 'o', color='red', alpha=0.5)
axis.set_aspect('equal')
axis = axis_v[1]
axis.set_title('squared distance')
axis.semilogy(smo.t_v(), smo.squared_distance_function())
axis.axvline(smo.t_v()[smo.Q_global_min_index()], color='green')
axis = axis_v[2]
axis.set_title('curve energy')
axis.plot(smo.t_v(), np.apply_along_axis(HeisenbergDynamicsContext.hamiltonian, 1, flow_curve))
row_count = 2
col_count = 4
fig,axis_vv = plt.subplots(row_count, col_count, squeeze=False, figsize=(15*col_count,15*row_count))
# axis = axis_vv[0][0]
# axis.set_title('initial curve')
# axis.plot(flow_curve_0[:,0], flow_curve_0[:,1])
# axis.set_aspect('equal')
plot_stuff(axis_v=axis_vv[0], smo=smo_0, name='initial')
plot_stuff(axis_v=axis_vv[1], smo=smo_opt, name='optimized')
axis = axis_vv[0][3]
axis.set_title('objective function history')
axis.semilogy(optimizer.obj_history_v)
# axis = axis_vv[1][0]
# axis.set_title('optimized curve')
# axis.plot(flow_curve_opt[:,0], flow_curve_opt[:,1])
# axis.set_aspect('equal')
# axis = axis_vv[1][2]
# axis.set_title('energy of optimized curve')
# axis.plot(smo_opt.t_v(), np.apply_along_axis(HeisenbergDynamicsContext.hamiltonian, 1, flow_curve_opt))
fig.tight_layout()
filename = 'shooting_method_2.png'
plt.savefig(filename)
print('wrote to file "{0}"'.format(filename))
| mit |
btabibian/scikit-learn | sklearn/cluster/birch.py | 11 | 23640 | # Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
It is a memory-efficient, online-learning algorithm provided as an
alternative to :class:`MiniBatchKMeans`. It constructs a tree
data structure with the cluster centroids being read off the leaf.
These can be either the final cluster centroids or can be provided as input
to another clustering algorithm such as :class:`AgglomerativeClustering`.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started. Setting this value to be very low promotes
splitting and vice-versa.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
that node is split into two nodes with the subclusters redistributed
in each. The parent subcluster of that node is removed and two new
subclusters are added as parents of the 2 split nodes.
n_clusters : int, instance of sklearn.cluster model, default 3
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples.
- `None` : the final clustering step is not performed and the
subclusters are returned as they are.
- `sklearn.cluster` Estimator : If a model is provided, the model is
fit treating the subclusters as new samples and the initial data is
mapped to the label of the closest subcluster.
- `int` : the model fit is :class:`AgglomerativeClustering` with
`n_clusters` set to be equal to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/archive/p/jbirch
Notes
-----
The tree data structure consists of nodes with each node consisting of
a number of subclusters. The maximum number of subclusters in a node
is determined by the branching factor. Each subcluster maintains a
linear sum, squared sum and the number of samples in that subcluster.
In addition, each subcluster can also have a node as its child, if the
subcluster is not a member of a leaf node.
For a new point entering the root, it is merged with the subcluster closest
to it and the linear sum, squared sum and the number of samples of that
subcluster are updated. This is done recursively till the properties of
the leaf node are updated.
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves : array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels : ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
meduz/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 46 | 9267 | import numpy as np
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import check_array
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0,
n_jobs=-1)
with ignore_warnings(category=ConvergenceWarning):
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only,
decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[np.newaxis, 1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[np.newaxis, 1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_input():
n_components = 100
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
Xf = check_array(X, order='F')
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
a = sparse_encode(X, V, algorithm=algo)
b = sparse_encode(Xf, V, algorithm=algo)
assert_array_almost_equal(a, b)
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
wagavulin/arrow | python/pyarrow/__init__.py | 1 | 8314 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# flake8: noqa
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
try:
# This code is duplicated from setup.py to avoid a dependency on each
# other.
def parse_version(root):
from setuptools_scm import version_from_scm
import setuptools_scm.git
describe = (setuptools_scm.git.DEFAULT_DESCRIBE +
" --match 'apache-arrow-[0-9]*'")
# Strip catchall from the commandline
describe = describe.replace("--match *.*", "")
version = setuptools_scm.git.parse(root, describe)
if not version:
return version_from_scm(root)
else:
return version
import setuptools_scm
__version__ = setuptools_scm.get_version('../', parse=parse_version)
except (ImportError, LookupError):
__version__ = None
from pyarrow.lib import cpu_count, set_cpu_count
from pyarrow.lib import (null, bool_,
int8, int16, int32, int64,
uint8, uint16, uint32, uint64,
time32, time64, timestamp, date32, date64,
float16, float32, float64,
binary, string, decimal128,
list_, struct, union, dictionary, field,
type_for_alias,
DataType, NAType,
Field,
Schema,
schema,
Array, Tensor,
array, chunked_array, column,
from_numpy_dtype,
NullArray,
NumericArray, IntegerArray, FloatingPointArray,
BooleanArray,
Int8Array, UInt8Array,
Int16Array, UInt16Array,
Int32Array, UInt32Array,
Int64Array, UInt64Array,
ListArray, UnionArray,
BinaryArray, StringArray,
FixedSizeBinaryArray,
DictionaryArray,
Date32Array, Date64Array,
TimestampArray, Time32Array, Time64Array,
Decimal128Array, StructArray,
ArrayValue, Scalar, NA,
BooleanValue,
Int8Value, Int16Value, Int32Value, Int64Value,
UInt8Value, UInt16Value, UInt32Value, UInt64Value,
HalfFloatValue, FloatValue, DoubleValue, ListValue,
BinaryValue, StringValue, FixedSizeBinaryValue,
DecimalValue,
Date32Value, Date64Value, TimestampValue)
# ARROW-1683: Remove after 0.8.0?
from pyarrow.lib import TimestampType
# Buffers, allocation
from pyarrow.lib import (Buffer, ResizableBuffer, foreign_buffer, py_buffer,
compress, decompress, allocate_buffer)
from pyarrow.lib import (MemoryPool, total_allocated_bytes,
set_memory_pool, default_memory_pool,
log_memory_allocations)
from pyarrow.lib import (HdfsFile, NativeFile, PythonFile,
FixedSizeBufferWriter,
BufferReader, BufferOutputStream,
OSFile, MemoryMappedFile, memory_map,
create_memory_map, have_libhdfs, have_libhdfs3,
MockOutputStream)
from pyarrow.lib import (ChunkedArray, Column, RecordBatch, Table,
concat_tables)
from pyarrow.lib import (ArrowException,
ArrowKeyError,
ArrowInvalid,
ArrowIOError,
ArrowMemoryError,
ArrowNotImplementedError,
ArrowTypeError,
ArrowSerializationError,
PlasmaObjectExists)
# Serialization
from pyarrow.lib import (deserialize_from, deserialize,
deserialize_components,
serialize, serialize_to, read_serialized,
SerializedPyObject, SerializationContext,
SerializationCallbackError,
DeserializationCallbackError)
from pyarrow.filesystem import FileSystem, LocalFileSystem
from pyarrow.hdfs import HadoopFileSystem
import pyarrow.hdfs as hdfs
from pyarrow.ipc import (Message, MessageReader,
RecordBatchFileReader, RecordBatchFileWriter,
RecordBatchStreamReader, RecordBatchStreamWriter,
read_message, read_record_batch, read_schema,
read_tensor, write_tensor,
get_record_batch_size, get_tensor_size,
open_stream,
open_file,
serialize_pandas, deserialize_pandas)
localfs = LocalFileSystem.get_instance()
from pyarrow.serialization import (default_serialization_context,
register_default_serialization_handlers,
register_torch_serialization_handlers)
import pyarrow.types as types
# Entry point for starting the plasma store
def _plasma_store_entry_point():
"""Entry point for starting the plasma store.
This can be used by invoking e.g.
``plasma_store -s /tmp/plasma -m 1000000000``
from the command line and will start the plasma_store executable with the
given arguments.
"""
import os
import pyarrow
import sys
plasma_store_executable = os.path.join(pyarrow.__path__[0], "plasma_store")
os.execv(plasma_store_executable, sys.argv)
# ----------------------------------------------------------------------
# Deprecations
from pyarrow.util import _deprecate_api # noqa
frombuffer = _deprecate_api('frombuffer', 'py_buffer', py_buffer, '0.9.0')
# ----------------------------------------------------------------------
# Returning absolute path to the pyarrow include directory (if bundled, e.g. in
# wheels)
def get_include():
"""
Return absolute path to directory containing Arrow C++ include
headers. Similar to numpy.get_include
"""
import os
return os.path.join(os.path.dirname(__file__), 'include')
def get_libraries():
"""
Return list of library names to include in the `libraries` argument for C
or Cython extensions using pyarrow
"""
return ['arrow_python']
def get_library_dirs():
"""
Return lists of directories likely to contain Arrow C++ libraries for
linking C or Cython extensions using pyarrow
"""
import os
import sys
package_cwd = os.path.dirname(__file__)
library_dirs = [package_cwd]
if sys.platform == 'win32':
# TODO(wesm): Is this necessary, or does setuptools within a conda
# installation add Library\lib to the linker path for MSVC?
site_packages, _ = os.path.split(package_cwd)
python_base_install, _ = os.path.split(site_packages)
library_dirs.append(os.path.join(python_base_install,
'Library', 'lib'))
return library_dirs
| apache-2.0 |
ClementLancien/convertToEntrezGeneID | script/conversion/info.py | 1 | 4970 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 22 16:42:44 2017
@author: clancien
"""
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import os
import pandas
import logging
from logging.handlers import RotatingFileHandler
import sys
class Info():
def __init__(self):
config = ConfigParser.ConfigParser()
config.readfp(open('../../configuration.ini','r'))
self.logFile = config.get('Error', 'logFile')
self.gene2info = config.get('Download', 'gene2info')
self.info = config.get('Convert', 'Info')
##Panda read _protein(same for all) as function and not as string so raise error
##To bypass this error we create for each file a new variable to store the path as string
self.filename_gene2info = str(self.gene2info)
self.filename_info = str(self.info)
self.size=1000000 #panda will read by chunksize here 1 million line by 1 million line
self.index_entrez = None
self.index_tax_id = None
self.index_symbol = None
self.index_description = None
self.dataframe = list
self.logger=None
self.formatter=None
self.file_handler=None
#GeneID UniGene_cluster
self.path_exist()
self.init_log()
self.create_index()
def path_exist(self):
""" Check if dir exist if not we create the path
string = dir/subdir/subsubdir
string.rsplit('/',1)[0]
==> return dir/subdir/ """
if not os.path.isdir(self.filename_info.rsplit('/',1)[0]):
os.makedirs(self.filename_info.rsplit('/', 1)[0])
def init_log(self):
# création de l'objet logger qui va nous servir à écrire dans les logs
self.logger = logging.getLogger()
# on met le niveau du logger à DEBUG, comme ça il écrit tout
self.logger.setLevel(logging.DEBUG)
# création d'un formateur qui va ajouter le temps, le niveau
# de chaque message quand on écrira un message dans le log
self.formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
# création d'un handler qui va rediriger une écriture du log vers
# un fichier en mode 'append', avec 1 backup et une taille max de 1Mo
self.file_handler = RotatingFileHandler(self.logFile, 'a', 1000000, 1)
# on lui met le niveau sur DEBUG, on lui dit qu'il doit utiliser le formateur
# créé précédement et on ajoute ce handler au logger
self.file_handler.setLevel(logging.DEBUG)
self.file_handler.setFormatter(self.formatter)
self.logger.addHandler(self.file_handler)
def create_index(self):
with open(self.filename_gene2info , 'r') as infile:
header_line = next(infile)
header_line = header_line.split('\t')
self.index_entrez = header_line.index('GeneID')
self.index_tax_id = header_line.index('#tax_id')
self.index_symbol = header_line.index('Symbol')
self.index_description = header_line.index('description')
def get_Info(self):
# ~False = true
try:
self.dataframe=[]
for df in pandas.read_csv(self.filename_gene2info ,header=0, sep="\t", usecols=[self.index_entrez, self.index_tax_id, self.index_symbol, self.index_description], dtype='str', chunksize=self.size):
df.columns = ['TAXID', 'EGID', 'SYMBOL', 'DESCRIPTION']
df = df[['EGID','TAXID', 'SYMBOL', 'DESCRIPTION']]
#df['EGID'] = df['EGID'].astype(str)
#df['TAXID'] = df['TAXID'].astype(str)
#df['SYMBOL'] = df['SYMBOL'].astype(str)
#df['DESCRIPTION'] = df['DESCRIPTION'].astype(str)
self.dataframe.append(df)
except:
self.logger.warning("Error - info.py - getInfo - loop over file" )
self.logger.warning("Exception at the line : {}".format(sys.exc_info()[-1].tb_lineno))
self.logger.warning(sys.exc_info())
else:
try:
pandas.concat(self.dataframe).drop_duplicates(['EGID','TAXID', 'SYMBOL', 'DESCRIPTION'], keep='first').to_csv(self.filename_info, header=None, index=None, sep='\t', mode='w')
except:
self.logger.warning("Error - info.py - getInfo - write File")
self.logger.warning("Exception at the line : {}".format(sys.exc_info()[-1].tb_lineno))
self.logger.warning(sys.exc_info())
if __name__ == '__main__':
Info().get_Info()
| mit |
pllim/astropy | astropy/utils/compat/optional_deps.py | 2 | 1548 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Checks for optional dependencies using lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
"""
import importlib
import warnings
# First, the top-level packages:
# TODO: This list is a duplicate of the dependencies in setup.cfg "all", but
# some of the package names are different from the pip-install name (e.g.,
# beautifulsoup4 -> bs4).
_optional_deps = ['bleach', 'bottleneck', 'bs4', 'bz2', 'h5py', 'html5lib',
'IPython', 'jplephem', 'lxml', 'matplotlib', 'mpmath',
'pandas', 'PIL', 'pytz', 'scipy', 'skyfield',
'sortedcontainers', 'lzma']
_formerly_optional_deps = ['yaml'] # for backward compatibility
_deps = {k.upper(): k for k in _optional_deps + _formerly_optional_deps}
# Any subpackages that have different import behavior:
_deps['PLT'] = 'matplotlib.pyplot'
__all__ = [f"HAS_{pkg}" for pkg in _deps]
def __getattr__(name):
if name in __all__:
module_name = name[4:]
if module_name == "YAML":
warnings.warn(
"PyYaml is now a strict dependency. HAS_YAML is deprecated as "
"of v5.0 and will be removed in a subsequent version.",
category=AstropyDeprecationWarning)
try:
importlib.import_module(_deps[module_name])
except (ImportError, ModuleNotFoundError):
return False
return True
raise AttributeError(f"Module {__name__!r} has no attribute {name!r}.")
| bsd-3-clause |
dpaiton/OpenPV | pv-core/analysis/python/plot_time_stability_all_k.py | 1 | 18262 | """
Plots the time stability
"""
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cm as cm
import PVReadWeights as rw
import PVConversions as conv
import scipy.cluster.vq as sp
import math
if len(sys.argv) < 5:
print "usage: time_stability filename on, filename off, filename-on post, filename-off post"
print len(sys.argv)
sys.exit()
w = rw.PVReadWeights(sys.argv[1])
wOff = rw.PVReadWeights(sys.argv[2])
space = 1
d = np.zeros((4,4))
nx = w.nx
ny = w.ny
nxp = w.nxp
nyp = w.nyp
numpat = w.numPatches
nf = w.nf
margin = 10
marginstart = margin
marginend = nx - margin
acount = 0
patchposition = []
def format_coord(x, y):
col = int(x+0.5)
row = int(y+0.5)
x2 = (x / 16.0)
y2 = (y / 16.0)
x = (x / 4.0)
y = (y / 4.0)
if col>=0 and col<numcols and row>=0 and row<numrows:
z = P[row,col]
return 'x=%1.4f, y=%1.4f, z=%1.4f'%(x, y, z)
else:
return 'x=%1.4d, y=%1.4d, x2=%1.4d, y2=%1.4d'%(int(x), int(y), int(x2), int(y2))
k = 16
for ko in range(numpat):
kxOn = conv.kxPos(ko, nx, ny, nf)
kyOn = conv.kyPos(ko, nx, ny, nf)
p = w.next_patch()
poff = wOff.next_patch()
if marginstart < kxOn < marginend:
if marginstart < kyOn < marginend:
acount = acount + 1
if kxOn == margin + 1 and kyOn == margin + 1:
don = p
doff = poff
d = np.append(don, doff)
else:
don = p
doff = poff
e = np.append(don, doff)
d = np.vstack((d,e))
wd = sp.whiten(d)
result = sp.kmeans2(wd, k)
cluster = result[1]
nx_im = 2 * (nxp + space) + space
ny_im = k * (nyp + space) + space
im = np.zeros((nx_im, ny_im))
im[:,:] = (w.max - w.min) / 2.
nx_im2 = nx * (nxp)
ny_im2 = ny * (nyp)
im2 = np.zeros((nx_im2, ny_im2))
im2[:,:] = (w.max - w.min) / 2.
nx_im3 = nx * (nxp)
ny_im3 = ny * (nyp)
im3 = np.zeros((nx_im3, ny_im3))
im3[:,:] = (w.max - w.min) / 2.
b = result[0]
c = np.hsplit(b, 2)
con = c[0]
coff = c[1]
for i in range(k):
d = con[i].reshape(nxp, nyp)
numrows, numcols = d.shape
x = space + (space + nxp) * (i % k)
y = space + (space + nyp) * (i / k)
im[y:y+nyp, x:x+nxp] = d
for i in range(k):
e = coff[i].reshape(nxp, nyp)
numrows, numcols = e.shape
i = i + k
x = space + (space + nxp) * (i % k)
y = space + (space + nyp) * (i / k)
im[y:y+nyp, x:x+nxp] = e
kcount1 = 0.0
kcount2 = 0.0
kcount3 = 0.0
kcount4 = 0.0
kcount5 = 0.0
kcount6 = 0.0
kcount7 = 0.0
kcount8 = 0.0
kcount9 = 0.0
kcount10 = 0.0
kcount11 = 0.0
kcount12 = 0.0
kcount13 = 0.0
kcount14= 0.0
kcount15 = 0.0
kcount16 = 0.0
for i in range(acount):
if cluster[i] == 0:
kcount1 = kcount1 + 1
if cluster[i] == 1:
kcount2 = kcount2 + 1
if cluster[i] == 2:
kcount3 = kcount3 + 1
if cluster[i] == 3:
kcount4 = kcount4 + 1
if cluster[i] == 4:
kcount5 = kcount5 + 1
if cluster[i] == 5:
kcount6 = kcount6 + 1
if cluster[i] == 6:
kcount7 = kcount7 + 1
if cluster[i] == 7:
kcount8 = kcount8 + 1
if cluster[i] == 8:
kcount9 = kcount9 + 1
if cluster[i] == 9:
kcount10 = kcount10 + 1
if cluster[i] == 10:
kcount11 = kcount11 + 1
if cluster[i] == 11:
kcount12 = kcount12 + 1
if cluster[i] == 12:
kcount13 = kcount13 + 1
if cluster[i] == 13:
kcount14 = kcount14 + 1
if cluster[i] == 14:
kcount15 = kcount15 + 1
if cluster[i] == 15:
kcount16 = kcount16 + 1
kcountper1 = kcount1 / acount
kcountper2 = kcount2 / acount
kcountper3 = kcount3 / acount
kcountper4 = kcount4 / acount
kcountper5 = kcount5 / acount
kcountper6 = kcount6 / acount
kcountper7 = kcount7 / acount
kcountper8 = kcount8 / acount
kcountper9 = kcount9 / acount
kcountper10 = kcount10 / acount
kcountper11 = kcount11 / acount
kcountper12 = kcount12 / acount
kcountper13 = kcount13 / acount
kcountper14 = kcount14 / acount
kcountper15 = kcount15 / acount
kcountper16 = kcount16 / acount
"""
fig = plt.figure()
ax = fig.add_subplot(111)
textx = (-7/16.0) * k
texty = (10/16.0) * k
ax.set_title('On and Off K-means')
ax.set_axis_off()
ax.text(textx, texty,'ON\n\nOff', fontsize='xx-large', rotation='horizontal')
ax.text( -5, 12, "Percent %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f" %(kcountper1, kcountper2, kcountper3, kcountper4, kcountper5, kcountper6, kcountper7, kcountper8, kcountper9, kcountper10, kcountper11, kcountper12, kcountper13, kcountper14, kcountper15, kcountper16), fontsize='large', rotation='horizontal')
ax.text(-4, 14, "Patch 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16", fontsize='x-large', rotation='horizontal')
ax.imshow(im, cmap=cm.jet, interpolation='nearest', vmin=w.min, vmax=w.max)
plt.show()
"""
##########
# Choose K-cluster
##########
#feature1 = input('Please which k-cluster to compare:')
feature1 = 1
##########
# Find Position of Patches in K-cluster[x]
##########
total = []
logtotal = []
def k_stability_analysis(k, forwardjump):
w = rw.PVReadWeights(sys.argv[1])
feature = k - 1
count = 0
d = np.zeros((nxp,nyp))
w.rewind()
for ko in np.arange(numpat):
kxOn = conv.kxPos(ko, nx, ny, nf)
kyOn = conv.kyPos(ko, nx, ny, nf)
p = w.next_patch()
if marginstart < kxOn < marginend:
if marginstart < kyOn < marginend:
if cluster[count] == feature:
e = p
e = e.reshape(nxp, nyp)
numrows, numcols = e.shape
count = count + 1
patpos = w.file.tell()
patchposition.append(patpos)
else:
e = d
count = count + 1
else:
e = d
else:
e = d
x = (nxp) * (ko % nx)
y = ( nyp) * (ko / nx)
im2[y:y+nyp, x:x+nxp] = e
##########
# Find Valuse of K-cluster[x] Patches
##########
w = rw.PVReadWeights(sys.argv[3])
wOff = rw.PVReadWeights(sys.argv[4])
w.rewind()
wOff.rewind()
patpla = patchposition
lenpat = len(patpla)
number = w.numPatches
count = 0
exp = []
expOff = []
exppn = []
exppnOff = []
body = w.recSize + 4
hs = w.headerSize
filesize = os.path.getsize(sys.argv[3])
bint = filesize / body
bint = bint - forwardjump - 1
if forwardjump == 0:
4
else:
leap = ((body * forwardjump) + (100 * forwardjump))
w.file.seek(leap, os.SEEK_CUR)
countso = 0
for i in range(bint):
countso += 1
print countso
if i == 0:
for j in range(lenpat):
if j == 0:
go = patpla[0] - hs - 20
w.file.seek(go, os.SEEK_CUR)
wOff.file.seek(go, os.SEEK_CUR)
p = w.next_patch()
pOff = wOff.next_patch()
if len(p) == 0:
print"STOPPEP SUPER EARLY"
sys.exit()
don = p
doff = pOff
allpat = 0
d = np.append(don, doff)
p = w.normalize(d)
pn = p
pn = np.reshape(np.matrix(pn),(1,32))
p = np.reshape(np.matrix(p),(32,1))
pm = pn * p
exppn = np.append(exppn, pn)
exp = np.append(exp,pm)
else:
pospost = patpla[j - 1]
poscur = patpla[j]
jump = poscur - pospost - 20
w.file.seek(jump, os.SEEK_CUR)
wOff.file.seek(jump, os.SEEK_CUR)
p = w.next_patch()
pOff = wOff.next_patch()
if len(pOff) == 0:
print"STOPPED EARLY"
sys.exit()
don = p
doff = pOff
d = np.append(don, doff)
p = w.normalize(d)
pn = p
pn = np.reshape(np.matrix(pn),(1,32))
p = np.reshape(np.matrix(p),(32,1))
pm = pn * p
exppn = np.append(exppn, pn)
exp = np.append(exp,pm)
#print "Ch-Ch-Changes", exppn
else:
count = 0
prejump = body - patpla[lenpat-1] + hs
w.file.seek(prejump, os.SEEK_CUR)
wOff.file.seek(prejump, os.SEEK_CUR)
for j in range(lenpat):
if j == 0:
go = patpla[0] - 4 - 20
w.file.seek(go, os.SEEK_CUR)
wOff.file.seek(go, os.SEEK_CUR)
p = w.next_patch()
pOff = wOff.next_patch()
test = p
if len(test) == 0:
print "stop"
input('Press Enter to Continue')
sys.exit()
don = p
doff = pOff
d = np.append(don, doff)
p = w.normalize(d)
p = np.reshape(np.matrix(p),(32,1))
j1 = 0
j2 = 32
pm = np.matrix(exppn[j1:j2]) * p
exp = np.append(exp,pm)
count += 1
else:
pospost = patpla[j - 1]
poscur = patpla[j]
jump = poscur - pospost - 20
w.file.seek(jump, os.SEEK_CUR)
wOff.file.seek(jump, os.SEEK_CUR)
p = w.next_patch()
pOff = wOff.next_patch()
test = pOff
if len(test) == 0:
print "stop"
input('Press Enter to Continue')
sys.exit()
don = p
doff = pOff
d = np.append(don, doff)
p = w.normalize(d)
p = np.reshape(np.matrix(p),(32,1))
j1 = 32 * j
j2 = 32 * (j +1)
pm = np.matrix(exppn[j1:j2]) * p
exp = np.append(exp,pm)
count += 1
##########
# Find Average of K-cluster[x] Weights
##########
thenumber = lenpat
thenumberf = float(thenumber)
patpla = exp
lenpat = len(patpla)
howlong = lenpat / thenumber
total = []
logtotal = []
for i in range(thenumber):
subtotal = []
logsubtotal = []
for j in range(howlong):
if i == 0:
value = patpla[i + (thenumber * j)]
total = np.append(total, value)
logvalue = patpla[i + (thenumber * j)]
logvalue = math.log10(logvalue)
logtotal = np.append(logtotal, logvalue)
else:
value = patpla[i + (thenumber * j)]
subtotal = np.append(subtotal, value)
logvalue = patpla[i + (thenumber * j)]
logvalue = math.log10(logvalue)
logsubtotal = np.append(logsubtotal, logvalue)
if i > 0:
total = total + subtotal
if i > 0:
logtotal = logtotal + logsubtotal
total = total / thenumberf
logtotal = logtotal / thenumberf
global total1
global total2
global total3
global total4
global total5
global total6
global total7
global total8
global total9
global total10
global total11
global total12
global total13
global total14
global total15
global total16
global logtotal1
global logtotal2
global logtotal3
global logtotal4
global logtotal5
global logtotal6
global logtotal7
global logtotal8
global logtotal9
global logtotal10
global logtotal11
global logtotal12
global logtotal13
global logtotal14
global logtotal15
global logtotal16
if feature == 0:
total1 = [0.0]
total2 = [0.0]
total3 = [0.0]
total4 = [0.0]
total5 = [0.0]
total6 = [0.0]
total7 = [0.0]
total8 = [0.0]
total9 = [0.0]
total10 = [0.0]
total11 = [0.0]
total12 = [0.0]
total13 = [0.0]
total14 = [0.0]
total15 = [0.0]
total16 = [0.0]
logtotal1 = [0.0]
logtotal2 = [0.0]
logtotal3 = [0.0]
logtotal4 = [0.0]
logtotal5 = [0.0]
logtotal6 = [0.0]
logtotal7 = [0.0]
logtotal8 = [0.0]
logtotal9 = [0.0]
logtotal10 = [0.0]
logtotal11 = [0.0]
logtotal12 = [0.0]
logtotal13 = [0.0]
logtotal14 = [0.0]
logtotal15 = [0.0]
logtotal16 = [0.0]
if feature == 0:
total1 = total
logtotal1 = logtotal
if feature == 1:
total2 = total
logtotal2 = logtotal
if feature == 2:
total3 = total
logtotal3 = logtotal
if feature == 3:
total4 = total
logtotal4 = logtotal
if feature == 4:
total5 = total
logtotal5 = logtotal
if feature == 5:
total6 = total
logtotal6 = logtotal
if feature == 6:
total7 = total
logtotal7 = logtotal
if feature == 7:
total8 = total
logtotal8 = logtotal
if feature == 8:
total9 = total
logtotal9 = logtotal
if feature == 9:
total10 = total
logtotal10 = logtotal
if feature == 10:
total11 = total
logtotal11 = logtotal
if feature == 11:
total12 = total
logtotal12 = logtotal
if feature == 12:
total13 = total
logtotal13 = logtotal
if feature == 13:
total14 = total
logtotal14 = logtotal
if feature == 14:
total15 = total
logtotal15 = logtotal
if feature == 15:
total16 = total
logtotal16 = logtotal
return
w = rw.PVReadWeights(sys.argv[3])
body = w.recSize + 4
hs = w.headerSize
filesize = os.path.getsize(sys.argv[3])
bint = filesize / body
print
print "Number of steps = ", bint
forwardjump = input('How many steps forward:')
count = 0
#for i in range(16):
# i = i + 1
# i = feature1
# k_stability_analysis(i, forwardjump)
# count += 1
# print count
for i in range(1):
i = feature1
k_stability_analysis(i, forwardjump)
count += 1
print count
if len(total1) == 0:
total1 = .5
if len(total2) == 0:
total2 = .5
if len(total3) == 0:
total3 = .5
if len(total4) == 0:
total4 = .5
if len(total5) == 0:
total5 = .5
if len(total6) == 0:
total6 = .5
if len(total7) == 0:
total7 = .5
if len(total8) == 0:
total8 = .5
if len(total9) == 0:
total9 = .5
if len(total10) == 0:
total10 = .5
if len(total11) == 0:
total11 = .5
if len(total12) == 0:
total12 = .5
if len(total13) == 0:
total13 = .5
if len(total14) == 0:
total14 = .5
if len(total15) == 0:
total15 = .5
if len(total16) == 0:
total16 = .5
##########
# Plot Time Stability Curve
##########
fig = plt.figure()
ax = fig.add_subplot(111)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111, axisbg='darkslategray')
#fig3 = plt.figure()
#ax3 = fig3.add_subplot(111, axisbg='darkslategray')
textx = (-7/16.0) * k
texty = (10/16.0) * k
ax.set_title('On and Off K-means')
ax.set_axis_off()
ax.text(textx, texty,'ON\n\nOff', fontsize='xx-large', rotation='horizontal')
ax.text( -5, 12, "Percent %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f" %(kcountper1, kcountper2, kcountper3, kcountper4, kcountper5, kcountper6, kcountper7, kcountper8, kcountper9, kcountper10, kcountper11, kcountper12, kcountper13, kcountper14, kcountper15, kcountper16), fontsize='large', rotation='horizontal')
ax.text(-4, 14, "Patch 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16", fontsize='x-large', rotation='horizontal')
ax.imshow(im, cmap=cm.jet, interpolation='nearest', vmin=w.min, vmax=w.max)
ax2.plot(np.arange(len(total1)), total1, '-o', color='y')
ax2.plot(np.arange(len(logtotal1)), logtotal1, '-o', color='y')
ax2.plot(np.arange(len(total2)), total2, '-o', color='r')
ax2.plot(np.arange(len(logtotal2)), logtotal2, '-o', color='r')
ax2.plot(np.arange(len(total3)), total3, '-o', color='b')
ax2.plot(np.arange(len(logtotal3)), logtotal3, '-o', color='b')
ax2.plot(np.arange(len(total4)), total4, '-o', color='c')
ax2.plot(np.arange(len(logtotal4)), logtotal4, '-o', color='c')
ax2.plot(np.arange(len(total5)), total5, '-o', color='m')
ax2.plot(np.arange(len(logtotal5)), logtotal5, '-o', color='m')
ax2.plot(np.arange(len(total6)), total6, '-o', color='k')
ax2.plot(np.arange(len(logtotal6)), logtotal6, '-o', color='k')
ax2.plot(np.arange(len(total7)), total7, '-o', color='w')
ax2.plot(np.arange(len(logtotal7)), logtotal7, '-o', color='w')
ax2.plot(np.arange(len(total8)), total8, '-o', color='g')
ax2.plot(np.arange(len(logtotal8)), logtotal8, '-o', color='g')
#print "yellow = 1, 9"
#print "red = 2, 10"
#print "blue = 3, 11"
#print "cyan = 4, 12"
#print "magenta = 5, 13"
#print "black = 6, 14"
#print "white = 7, 15"
#print "green = 8, 16"
#ax3.plot(np.arange(len(total9)), total9, '-o', color='y')
#ax3.plot(np.arange(len(logtotal9)), logtotal9, '-o', color='y')
#ax3.plot(np.arange(len(total10)), total10, '-o', color='r')
#ax3.plot(np.arange(len(logtotal10)), logtotal10, '-o', color='r')
#ax3.plot(np.arange(len(total11)), total11, '-o', color='b')
#ax3.plot(np.arange(len(logtotal11)), logtotal11, '-o', color='b')
#ax3.plot(np.arange(len(total12)), total12, '-o', color='c')
#ax3.plot(np.arange(len(logtotal12)), logtotal12, '-o', color='c')
#ax3.plot(np.arange(len(total13)), total13, '-o', color='m')
#ax3.plot(np.arange(len(logtotal13)), logtotal13, '-o', color='m')
#ax3.plot(np.arange(len(total14)), total14, '-o', color='k')
#ax3.plot(np.arange(len(logtotal14)), logtotal14, '-o', color='k')
#ax3.plot(np.arange(len(total15)), total15, '-o', color='w')
#ax3.plot(np.arange(len(logtotal15)), logtotal15, '-o', color='w')
#ax3.plot(np.arange(len(total16)), total16, '-o', color='g')
#ax3.plot(np.arange(len(logtotal16)), logtotal16, '-o', color='g')
ax2.set_xlabel('Time')
ax2.set_ylabel('Avg Correlation')
ax2.set_title('Time Stability k 1')
ax2.set_xlim(0, len(total1))
ax2.grid(True)
#ax3.set_xlabel('Time')
#ax3.set_ylabel('Avg Correlation')
#ax3.set_title('Time Stability k 9-16')
#ax3.set_xlim(0, len(total1))
#ax3.grid(True)
plt.show()
| epl-1.0 |
walterreade/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 58 | 17158 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
| bsd-3-clause |
DGrady/pandas | pandas/tests/frame/test_subclass.py | 15 | 9524 | # -*- coding: utf-8 -*-
from __future__ import print_function
from warnings import catch_warnings
import numpy as np
from pandas import DataFrame, Series, MultiIndex, Panel
import pandas as pd
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSubclassing(TestData):
def test_frame_subclassing_and_slicing(self):
# Subclass frame and ensure it returns the right class on slicing it
# In reference to PR 9632
class CustomSeries(Series):
@property
def _constructor(self):
return CustomSeries
def custom_series_function(self):
return 'OK'
class CustomDataFrame(DataFrame):
"""
Subclasses pandas DF, fills DF with simulation results, adds some
custom plotting functions.
"""
def __init__(self, *args, **kw):
super(CustomDataFrame, self).__init__(*args, **kw)
@property
def _constructor(self):
return CustomDataFrame
_constructor_sliced = CustomSeries
def custom_frame_function(self):
return 'OK'
data = {'col1': range(10),
'col2': range(10)}
cdf = CustomDataFrame(data)
# Did we get back our own DF class?
assert isinstance(cdf, CustomDataFrame)
# Do we get back our own Series class after selecting a column?
cdf_series = cdf.col1
assert isinstance(cdf_series, CustomSeries)
assert cdf_series.custom_series_function() == 'OK'
# Do we get back our own DF class after slicing row-wise?
cdf_rows = cdf[1:5]
assert isinstance(cdf_rows, CustomDataFrame)
assert cdf_rows.custom_frame_function() == 'OK'
# Make sure sliced part of multi-index frame is custom class
mcol = pd.MultiIndex.from_tuples([('A', 'A'), ('A', 'B')])
cdf_multi = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)
assert isinstance(cdf_multi['A'], CustomDataFrame)
mcol = pd.MultiIndex.from_tuples([('A', ''), ('B', '')])
cdf_multi2 = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)
assert isinstance(cdf_multi2['A'], CustomSeries)
def test_dataframe_metadata(self):
df = tm.SubclassedDataFrame({'X': [1, 2, 3], 'Y': [1, 2, 3]},
index=['a', 'b', 'c'])
df.testattr = 'XXX'
assert df.testattr == 'XXX'
assert df[['X']].testattr == 'XXX'
assert df.loc[['a', 'b'], :].testattr == 'XXX'
assert df.iloc[[0, 1], :].testattr == 'XXX'
# see gh-9776
assert df.iloc[0:1, :].testattr == 'XXX'
# see gh-10553
unpickled = tm.round_trip_pickle(df)
tm.assert_frame_equal(df, unpickled)
assert df._metadata == unpickled._metadata
assert df.testattr == unpickled.testattr
def test_indexing_sliced(self):
# GH 11559
df = tm.SubclassedDataFrame({'X': [1, 2, 3],
'Y': [4, 5, 6],
'Z': [7, 8, 9]},
index=['a', 'b', 'c'])
res = df.loc[:, 'X']
exp = tm.SubclassedSeries([1, 2, 3], index=list('abc'), name='X')
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.iloc[:, 1]
exp = tm.SubclassedSeries([4, 5, 6], index=list('abc'), name='Y')
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.loc[:, 'Z']
exp = tm.SubclassedSeries([7, 8, 9], index=list('abc'), name='Z')
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.loc['a', :]
exp = tm.SubclassedSeries([1, 4, 7], index=list('XYZ'), name='a')
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.iloc[1, :]
exp = tm.SubclassedSeries([2, 5, 8], index=list('XYZ'), name='b')
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.loc['c', :]
exp = tm.SubclassedSeries([3, 6, 9], index=list('XYZ'), name='c')
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
def test_to_panel_expanddim(self):
# GH 9762
with catch_warnings(record=True):
class SubclassedFrame(DataFrame):
@property
def _constructor_expanddim(self):
return SubclassedPanel
class SubclassedPanel(Panel):
pass
index = MultiIndex.from_tuples([(0, 0), (0, 1), (0, 2)])
df = SubclassedFrame({'X': [1, 2, 3], 'Y': [4, 5, 6]}, index=index)
result = df.to_panel()
assert isinstance(result, SubclassedPanel)
expected = SubclassedPanel([[[1, 2, 3]], [[4, 5, 6]]],
items=['X', 'Y'], major_axis=[0],
minor_axis=[0, 1, 2],
dtype='int64')
tm.assert_panel_equal(result, expected)
def test_subclass_attr_err_propagation(self):
# GH 11808
class A(DataFrame):
@property
def bar(self):
return self.i_dont_exist
with tm.assert_raises_regex(AttributeError, '.*i_dont_exist.*'):
A().bar
def test_subclass_align(self):
# GH 12983
df1 = tm.SubclassedDataFrame({'a': [1, 3, 5],
'b': [1, 3, 5]}, index=list('ACE'))
df2 = tm.SubclassedDataFrame({'c': [1, 2, 4],
'd': [1, 2, 4]}, index=list('ABD'))
res1, res2 = df1.align(df2, axis=0)
exp1 = tm.SubclassedDataFrame({'a': [1, np.nan, 3, np.nan, 5],
'b': [1, np.nan, 3, np.nan, 5]},
index=list('ABCDE'))
exp2 = tm.SubclassedDataFrame({'c': [1, 2, np.nan, 4, np.nan],
'd': [1, 2, np.nan, 4, np.nan]},
index=list('ABCDE'))
assert isinstance(res1, tm.SubclassedDataFrame)
tm.assert_frame_equal(res1, exp1)
assert isinstance(res2, tm.SubclassedDataFrame)
tm.assert_frame_equal(res2, exp2)
res1, res2 = df1.a.align(df2.c)
assert isinstance(res1, tm.SubclassedSeries)
tm.assert_series_equal(res1, exp1.a)
assert isinstance(res2, tm.SubclassedSeries)
tm.assert_series_equal(res2, exp2.c)
def test_subclass_align_combinations(self):
# GH 12983
df = tm.SubclassedDataFrame({'a': [1, 3, 5],
'b': [1, 3, 5]}, index=list('ACE'))
s = tm.SubclassedSeries([1, 2, 4], index=list('ABD'), name='x')
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = pd.DataFrame({'a': [1, np.nan, 3, np.nan, 5],
'b': [1, np.nan, 3, np.nan, 5]},
index=list('ABCDE'))
# name is lost when
exp2 = pd.Series([1, 2, np.nan, 4, np.nan],
index=list('ABCDE'), name='x')
assert isinstance(res1, tm.SubclassedDataFrame)
tm.assert_frame_equal(res1, exp1)
assert isinstance(res2, tm.SubclassedSeries)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
assert isinstance(res1, tm.SubclassedSeries)
tm.assert_series_equal(res1, exp2)
assert isinstance(res2, tm.SubclassedDataFrame)
tm.assert_frame_equal(res2, exp1)
def test_subclass_iterrows(self):
# GH 13977
df = tm.SubclassedDataFrame({'a': [1]})
for i, row in df.iterrows():
assert isinstance(row, tm.SubclassedSeries)
tm.assert_series_equal(row, df.loc[i])
def test_subclass_sparse_slice(self):
rows = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]
ssdf = tm.SubclassedSparseDataFrame(rows)
ssdf.testattr = "testattr"
tm.assert_sp_frame_equal(ssdf.loc[:2],
tm.SubclassedSparseDataFrame(rows[:3]))
tm.assert_sp_frame_equal(ssdf.iloc[:2],
tm.SubclassedSparseDataFrame(rows[:2]))
tm.assert_sp_frame_equal(ssdf[:2],
tm.SubclassedSparseDataFrame(rows[:2]))
assert ssdf.loc[:2].testattr == "testattr"
assert ssdf.iloc[:2].testattr == "testattr"
assert ssdf[:2].testattr == "testattr"
tm.assert_sp_series_equal(ssdf.loc[1],
tm.SubclassedSparseSeries(rows[1]),
check_names=False)
tm.assert_sp_series_equal(ssdf.iloc[1],
tm.SubclassedSparseSeries(rows[1]),
check_names=False)
def test_subclass_sparse_transpose(self):
ossdf = tm.SubclassedSparseDataFrame([[1, 2, 3],
[4, 5, 6]])
essdf = tm.SubclassedSparseDataFrame([[1, 4],
[2, 5],
[3, 6]])
tm.assert_sp_frame_equal(ossdf.T, essdf)
| bsd-3-clause |
GunoH/intellij-community | python/helpers/pydev/pydevd.py | 9 | 90108 | '''
Entry point module (keep at root):
This module starts the debugger.
'''
import os
import sys
from contextlib import contextmanager
import weakref
# allow the debugger to work in isolated mode Python
here = os.path.dirname(os.path.abspath(__file__))
if here not in sys.path:
sys.path.insert(0, here)
from _pydevd_bundle.pydevd_collect_try_except_info import collect_return_info
if sys.version_info[:2] < (2, 6):
raise RuntimeError('The PyDev.Debugger requires Python 2.6 onwards to be run. If you need to use an older Python version, use an older version of the debugger.')
import itertools
import atexit
import os
import traceback
from functools import partial
from collections import defaultdict
from _pydevd_bundle.pydevd_constants import IS_JYTH_LESS25, IS_PYCHARM, get_thread_id, get_current_thread_id, \
dict_keys, dict_iter_items, DebugInfoHolder, PYTHON_SUSPEND, STATE_SUSPEND, STATE_RUN, get_frame, xrange, \
clear_cached_thread_id, INTERACTIVE_MODE_AVAILABLE, SHOW_DEBUG_INFO_ENV, IS_PY34_OR_GREATER, IS_PY36_OR_GREATER, \
IS_PY2, NULL, NO_FTRACE, dummy_excepthook, IS_CPYTHON, GOTO_HAS_RESPONSE
from _pydev_bundle import fix_getpass
from _pydev_bundle import pydev_imports, pydev_log
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
from _pydev_bundle.pydev_is_thread_alive import is_thread_alive
from _pydev_imps._pydev_saved_modules import threading
from _pydev_imps._pydev_saved_modules import time
from _pydev_imps._pydev_saved_modules import thread
from _pydevd_bundle import pydevd_io, pydevd_vm_type
import pydevd_tracing
from _pydevd_bundle import pydevd_utils
from _pydevd_bundle import pydevd_vars
from _pydev_bundle.pydev_override import overrides
from _pydevd_bundle.pydevd_breakpoints import ExceptionBreakpoint, set_fallback_excepthook, disable_excepthook
from _pydevd_bundle.pydevd_comm import CMD_SET_BREAK, CMD_SET_NEXT_STATEMENT, CMD_STEP_INTO, CMD_STEP_OVER, \
CMD_STEP_RETURN, CMD_STEP_INTO_MY_CODE, CMD_THREAD_SUSPEND, CMD_RUN_TO_LINE, \
CMD_ADD_EXCEPTION_BREAK, CMD_SMART_STEP_INTO, InternalConsoleExec, NetCommandFactory, \
PyDBDaemonThread, _queue, ReaderThread, GetGlobalDebugger, get_global_debugger, \
set_global_debugger, WriterThread, pydevd_log, \
start_client, start_server, InternalGetBreakpointException, InternalSendCurrExceptionTrace, \
InternalSendCurrExceptionTraceProceeded, CommunicationRole, run_as_pydevd_daemon_thread
from _pydevd_bundle.pydevd_custom_frames import CustomFramesContainer, custom_frames_container_init
from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame, remove_exception_from_frame
from _pydevd_bundle.pydevd_kill_all_pydevd_threads import kill_all_pydev_threads
from _pydevd_bundle.pydevd_trace_dispatch import (
trace_dispatch as _trace_dispatch, global_cache_skips, global_cache_frame_skips, show_tracing_warning)
from _pydevd_frame_eval.pydevd_frame_eval_main import (
frame_eval_func, dummy_trace_dispatch, show_frame_eval_warning)
from _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info
from _pydevd_bundle.pydevd_utils import save_main_module
from pydevd_concurrency_analyser.pydevd_concurrency_logger import ThreadingLogger, AsyncioLogger, send_message, cur_time
from pydevd_concurrency_analyser.pydevd_thread_wrappers import wrap_threads, wrap_asyncio
from pydevd_file_utils import get_fullname, rPath, get_package_dir
import pydev_ipython # @UnusedImport
from _pydevd_bundle.pydevd_dont_trace_files import DONT_TRACE
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, NORM_PATHS_AND_BASE_CONTAINER
get_file_type = DONT_TRACE.get
__version_info__ = (1, 4, 0)
__version_info_str__ = []
for v in __version_info__:
__version_info_str__.append(str(v))
__version__ = '.'.join(__version_info_str__)
#IMPORTANT: pydevd_constants must be the 1st thing defined because it'll keep a reference to the original sys._getframe
def install_breakpointhook(pydevd_breakpointhook=None):
if pydevd_breakpointhook is None:
from _pydevd_bundle.pydevd_breakpointhook import breakpointhook
pydevd_breakpointhook = breakpointhook
if sys.version_info >= (3, 7):
# There are some choices on how to provide the breakpoint hook. Namely, we can provide a
# PYTHONBREAKPOINT which provides the import path for a method to be executed or we
# can override sys.breakpointhook.
# pydevd overrides sys.breakpointhook instead of providing an environment variable because
# it's possible that the debugger starts the user program but is not available in the
# PYTHONPATH (and would thus fail to be imported if PYTHONBREAKPOINT was set to pydevd.settrace).
# Note that the implementation still takes PYTHONBREAKPOINT in account (so, if it was provided
# by someone else, it'd still work).
sys.breakpointhook = pydevd_breakpointhook
# Install the breakpoint hook at import time.
install_breakpointhook()
SUPPORT_PLUGINS = not IS_JYTH_LESS25
PluginManager = None
if SUPPORT_PLUGINS:
from _pydevd_bundle.pydevd_plugin_utils import PluginManager
threadingEnumerate = threading.enumerate
threadingCurrentThread = threading.currentThread
original_excepthook = sys.__excepthook__
try:
'dummy'.encode('utf-8') # Added because otherwise Jython 2.2.1 wasn't finding the encoding (if it wasn't loaded in the main thread).
except:
pass
connected = False
bufferStdOutToServer = False
bufferStdErrToServer = False
remote = False
forked = False
file_system_encoding = getfilesystemencoding()
#=======================================================================================================================
# PyDBCommandThread
#=======================================================================================================================
class PyDBCommandThread(PyDBDaemonThread):
def __init__(self, py_db):
PyDBDaemonThread.__init__(self)
self._py_db_command_thread_event = py_db._py_db_command_thread_event
self.py_db = py_db
self.setName('pydevd.CommandThread')
@overrides(PyDBDaemonThread._on_run)
def _on_run(self):
# Delay a bit this initialization to wait for the main program to start.
time.sleep(0.3)
if self.killReceived:
return
try:
while not self.killReceived:
try:
self.py_db.process_internal_commands()
except:
pydevd_log(0, 'Finishing debug communication...(2)')
self._py_db_command_thread_event.clear()
self._py_db_command_thread_event.wait(0.3)
except:
pydev_log.debug(sys.exc_info()[0])
# only got this error in interpreter shutdown
# pydevd_log(0, 'Finishing debug communication...(3)')
#=======================================================================================================================
# CheckOutputThread
# Non-daemon thread: guarantees that all data is written even if program is finished
#=======================================================================================================================
class CheckOutputThread(PyDBDaemonThread):
def __init__(self, py_db):
PyDBDaemonThread.__init__(self)
self.py_db = py_db
self.setName('pydevd.CheckAliveThread')
self.daemon = False
@overrides(PyDBDaemonThread._on_run)
def _on_run(self):
while not self.killReceived:
time.sleep(0.3)
if not self.py_db.has_threads_alive() and self.py_db.writer.empty():
try:
pydev_log.debug("No threads alive, finishing debug session")
self.py_db.finish_debugging_session()
kill_all_pydev_threads()
except:
traceback.print_exc()
self.wait_pydb_threads_to_finish()
self.killReceived = True
self.py_db.check_output_redirect()
def wait_pydb_threads_to_finish(self, timeout=0.5):
pydev_log.debug("Waiting for pydb daemon threads to finish")
pydb_daemon_threads = self.created_pydb_daemon_threads
started_at = time.time()
while time.time() < started_at + timeout:
if len(pydb_daemon_threads) == 1 and pydb_daemon_threads.get(self, None):
return
time.sleep(0.01)
pydev_log.debug("The following pydb threads may not finished correctly: %s"
% ', '.join([t.getName() for t in pydb_daemon_threads if t is not self]))
def do_kill_pydev_thread(self):
self.killReceived = True
class TrackedLock(object):
"""The lock that tracks if it has been acquired by the current thread
"""
def __init__(self):
self._lock = thread.allocate_lock()
# thread-local storage
self._tls = threading.local()
self._tls.is_lock_acquired = False
def acquire(self):
self._lock.acquire()
self._tls.is_lock_acquired = True
def release(self):
self._lock.release()
self._tls.is_lock_acquired = False
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def is_acquired_by_current_thread(self):
return self._tls.is_lock_acquired
class AbstractSingleNotificationBehavior(object):
'''
The basic usage should be:
# Increment the request time for the suspend.
single_notification_behavior.increment_suspend_time()
# Notify that this is a pause request (when a pause, not a breakpoint).
single_notification_behavior.on_pause()
# Mark threads to be suspended.
set_suspend(...)
# On do_wait_suspend, use notify_thread_suspended:
def do_wait_suspend(...):
with single_notification_behavior.notify_thread_suspended(thread_id):
...
'''
__slots__ = [
'_last_resume_notification_time',
'_last_suspend_notification_time',
'_lock',
'_next_request_time',
'_suspend_time_request',
'_suspended_thread_ids',
'_pause_requested',
]
NOTIFY_OF_PAUSE_TIMEOUT = .5
def __init__(self):
self._next_request_time = partial(next, itertools.count())
self._last_suspend_notification_time = -1
self._last_resume_notification_time = -1
self._suspend_time_request = self._next_request_time()
self._lock = thread.allocate_lock()
self._suspended_thread_ids = set()
self._pause_requested = False
def send_suspend_notification(self, thread_id, stop_reason):
raise AssertionError('abstract: subclasses must override.')
def send_resume_notification(self, thread_id):
raise AssertionError('abstract: subclasses must override.')
def increment_suspend_time(self):
with self._lock:
self._suspend_time_request = self._next_request_time()
def on_pause(self):
# Upon a pause, we should force sending new suspend notifications
# if no notification is sent after some time and there's some thread already stopped.
with self._lock:
self._pause_requested = True
global_suspend_time = self._suspend_time_request
run_as_pydevd_daemon_thread(self._notify_after_timeout, global_suspend_time)
def _notify_after_timeout(self, global_suspend_time):
time.sleep(self.NOTIFY_OF_PAUSE_TIMEOUT)
with self._lock:
if self._suspended_thread_ids:
if global_suspend_time > self._last_suspend_notification_time:
self._last_suspend_notification_time = global_suspend_time
# Notify about any thread which is currently suspended.
self.send_suspend_notification(next(iter(self._suspended_thread_ids)), CMD_THREAD_SUSPEND)
@contextmanager
def notify_thread_suspended(self, thread_id, stop_reason):
with self._lock:
pause_requested = self._pause_requested
if pause_requested:
# When a suspend notification is sent, reset the pause flag.
self._pause_requested = False
self._suspended_thread_ids.add(thread_id)
# CMD_THREAD_SUSPEND should always be a side-effect of a break, so, only
# issue for a CMD_THREAD_SUSPEND if a pause is pending.
if stop_reason != CMD_THREAD_SUSPEND or pause_requested:
if self._suspend_time_request > self._last_suspend_notification_time:
self._last_suspend_notification_time = self._suspend_time_request
self.send_suspend_notification(thread_id, stop_reason)
try:
yield # At this point the thread must be actually suspended.
finally:
# on resume (step, continue all):
with self._lock:
self._suspended_thread_ids.remove(thread_id)
if self._last_resume_notification_time < self._last_suspend_notification_time:
self._last_resume_notification_time = self._last_suspend_notification_time
self.send_resume_notification(thread_id)
class ThreadsSuspendedSingleNotification(AbstractSingleNotificationBehavior):
__slots__ = AbstractSingleNotificationBehavior.__slots__ + [
'multi_threads_single_notification', '_py_db']
def __init__(self, py_db):
AbstractSingleNotificationBehavior.__init__(self)
# If True, pydevd will send a single notification when all threads are suspended/resumed.
self.multi_threads_single_notification = False
self._py_db = weakref.ref(py_db)
@overrides(AbstractSingleNotificationBehavior.send_resume_notification)
def send_resume_notification(self, thread_id):
py_db = self._py_db()
if py_db is not None:
py_db.writer.add_command(py_db.cmd_factory.make_thread_resume_single_notification(thread_id))
@overrides(AbstractSingleNotificationBehavior.send_suspend_notification)
def send_suspend_notification(self, thread_id, stop_reason):
py_db = self._py_db()
if py_db is not None:
py_db.writer.add_command(py_db.cmd_factory.make_thread_suspend_single_notification(thread_id, stop_reason))
@overrides(AbstractSingleNotificationBehavior.notify_thread_suspended)
@contextmanager
def notify_thread_suspended(self, thread_id, stop_reason):
if self.multi_threads_single_notification:
with AbstractSingleNotificationBehavior.notify_thread_suspended(self, thread_id, stop_reason):
yield
else:
yield
# noinspection SpellCheckingInspection
def stoptrace():
"""Stops tracing in the current process and undoes all monkey-patches done by the debugger."""
global connected
if connected:
pydevd_tracing.restore_sys_set_trace_func()
sys.settrace(None)
try:
# Not available in Jython!
threading.settrace(None) # Disable tracing for all future threads.
except:
pass
from _pydev_bundle.pydev_monkey import undo_patch_thread_modules
undo_patch_thread_modules()
debugger = get_global_debugger()
if debugger:
debugger.set_trace_for_frame_and_parents(get_frame(), disable=True)
debugger.exiting()
kill_all_pydev_threads()
connected = False
#=======================================================================================================================
# PyDB
#=======================================================================================================================
class PyDB(object):
""" Main debugging class
Lots of stuff going on here:
PyDB starts two threads on startup that connect to remote debugger (RDB)
The threads continuously read & write commands to RDB.
PyDB communicates with these threads through command queues.
Every RDB command is processed by calling process_net_command.
Every PyDB net command is sent to the net by posting NetCommand to WriterThread queue
Some commands need to be executed on the right thread (suspend/resume & friends)
These are placed on the internal command queue.
"""
def __init__(self, set_as_global=True):
if set_as_global:
set_global_debugger(self)
pydevd_tracing.replace_sys_set_trace_func()
self.reader = None
self.writer = None
self.output_checker_thread = None
self.py_db_command_thread = None
self.quitting = None
self.cmd_factory = NetCommandFactory()
self._cmd_queue = defaultdict(_queue.Queue) # Key is thread id or '*', value is Queue
self.breakpoints = {}
# mtime to be raised when breakpoints change
self.mtime = 0
self.file_to_id_to_line_breakpoint = {}
self.file_to_id_to_plugin_breakpoint = {}
# Note: breakpoints dict should not be mutated: a copy should be created
# and later it should be assigned back (to prevent concurrency issues).
self.break_on_uncaught_exceptions = {}
self.break_on_caught_exceptions = {}
self.ready_to_run = False
self._main_lock = TrackedLock()
self._lock_running_thread_ids = thread.allocate_lock()
self._py_db_command_thread_event = threading.Event()
if set_as_global:
CustomFramesContainer._py_db_command_thread_event = self._py_db_command_thread_event
self._finish_debugging_session = False
self._termination_event_set = False
self.signature_factory = None
self.SetTrace = pydevd_tracing.SetTrace
self.skip_on_exceptions_thrown_in_same_context = False
self.ignore_exceptions_thrown_in_lines_with_ignore_exception = True
# Suspend debugger even if breakpoint condition raises an exception.
# May be changed with CMD_PYDEVD_JSON_CONFIG.
self.skip_suspend_on_breakpoint_exception = () # By default suspend on any Exception.
self.skip_print_breakpoint_exception = () # By default print on any Exception.
# By default user can step into properties getter/setter/deleter methods
self.disable_property_trace = False
self.disable_property_getter_trace = False
self.disable_property_setter_trace = False
self.disable_property_deleter_trace = False
#this is a dict of thread ids pointing to thread ids. Whenever a command is passed to the java end that
#acknowledges that a thread was created, the thread id should be passed here -- and if at some time we do not
#find that thread alive anymore, we must remove it from this list and make the java side know that the thread
#was killed.
self._running_thread_ids = {}
self._set_breakpoints_with_id = False
# This attribute holds the file-> lines which have an @IgnoreException.
self.filename_to_lines_where_exceptions_are_ignored = {}
#working with plugins (lazily initialized)
self.plugin = None
self.has_plugin_line_breaks = False
self.has_plugin_exception_breaks = False
self.thread_analyser = None
self.asyncio_analyser = None
# matplotlib support in debugger and debug console
self.mpl_in_use = False
self.mpl_hooks_in_debug_console = False
self.mpl_modules_for_patching = {}
self._filename_to_not_in_scope = {}
self.first_breakpoint_reached = False
self.is_filter_enabled = pydevd_utils.is_filter_enabled()
self.is_filter_libraries = pydevd_utils.is_filter_libraries()
self.show_return_values = False
self.remove_return_values_flag = False
self.redirect_output = False
# this flag disables frame evaluation even if it's available
self.use_frame_eval = True
self.stop_on_start = False
# If True, pydevd will send a single notification when all threads are suspended/resumed.
self._threads_suspended_single_notification = ThreadsSuspendedSingleNotification(self)
self._local_thread_trace_func = threading.local()
# sequence id of `CMD_PROCESS_CREATED` command -> threading.Event
self.process_created_msg_received_events = dict()
# the role PyDB plays in the communication with IDE
self.communication_role = None
self.collect_return_info = collect_return_info
# If True, pydevd will stop on assertion errors in tests.
self.stop_on_failed_tests = False
def get_thread_local_trace_func(self):
try:
thread_trace_func = self._local_thread_trace_func.thread_trace_func
except AttributeError:
thread_trace_func = self.trace_dispatch
return thread_trace_func
def enable_tracing(self, thread_trace_func=None, apply_to_all_threads=False):
'''
Enables tracing.
If in regular mode (tracing), will set the tracing function to the tracing
function for this thread -- by default it's `PyDB.trace_dispatch`, but after
`PyDB.enable_tracing` is called with a `thread_trace_func`, the given function will
be the default for the given thread.
'''
set_fallback_excepthook()
if self.frame_eval_func is not None:
self.frame_eval_func()
pydevd_tracing.SetTrace(self.dummy_trace_dispatch)
if IS_CPYTHON and apply_to_all_threads:
pydevd_tracing.set_trace_to_threads(self.dummy_trace_dispatch)
return
if thread_trace_func is None:
thread_trace_func = self.get_thread_local_trace_func()
else:
self._local_thread_trace_func.thread_trace_func = thread_trace_func
pydevd_tracing.SetTrace(thread_trace_func)
if IS_CPYTHON and apply_to_all_threads:
pydevd_tracing.set_trace_to_threads(thread_trace_func)
def disable_tracing(self):
pydevd_tracing.SetTrace(None)
def on_breakpoints_changed(self, removed=False):
'''
When breakpoints change, we have to re-evaluate all the assumptions we've made so far.
'''
if not self.ready_to_run:
# No need to do anything if we're still not running.
return
self.mtime += 1
if not removed:
# When removing breakpoints we can leave tracing as was, but if a breakpoint was added
# we have to reset the tracing for the existing functions to be re-evaluated.
self.set_tracing_for_untraced_contexts()
def set_tracing_for_untraced_contexts(self, ignore_current_thread=False):
# Enable the tracing for existing threads (because there may be frames being executed that
# are currently untraced).
ignore_thread = None
if ignore_current_thread:
ignore_thread = threading.current_thread()
ignore_thread_ids = set(
t.ident for t in threadingEnumerate()
if getattr(t, 'is_pydev_daemon_thread', False) or getattr(t, 'pydev_do_not_trace', False)
)
if IS_CPYTHON:
# Note: use sys._current_frames instead of threading.enumerate() because this way
# we also see C/C++ threads, not only the ones visible to the threading module.
tid_to_frame = sys._current_frames()
for thread_id, frame in tid_to_frame.items():
if thread_id not in ignore_thread_ids:
self.set_trace_for_frame_and_parents(frame)
else:
threads = threadingEnumerate()
try:
for t in threads:
if t.ident in ignore_thread_ids or t is ignore_thread:
continue
additional_info = set_additional_thread_info(t)
frame = additional_info.get_topmost_frame(t)
try:
if frame is not None:
self.set_trace_for_frame_and_parents(frame)
finally:
frame = None
finally:
frame = None
t = None
threads = None
additional_info = None
@property
def multi_threads_single_notification(self):
return self._threads_suspended_single_notification.multi_threads_single_notification
@multi_threads_single_notification.setter
def multi_threads_single_notification(self, notify):
self._threads_suspended_single_notification.multi_threads_single_notification = notify
def get_plugin_lazy_init(self):
if self.plugin is None and SUPPORT_PLUGINS:
self.plugin = PluginManager(self)
return self.plugin
def in_project_scope(self, filename):
return pydevd_utils.in_project_roots(filename)
def is_ignored_by_filters(self, filename):
return pydevd_utils.is_ignored_by_filter(filename)
def is_exception_trace_in_project_scope(self, trace):
return pydevd_utils.is_exception_trace_in_project_scope(trace)
def is_top_level_trace_in_project_scope(self, trace):
return pydevd_utils.is_top_level_trace_in_project_scope(trace)
def is_test_item_or_set_up_caller(self, frame):
return pydevd_utils.is_test_item_or_set_up_caller(frame)
def set_unit_tests_debugging_mode(self):
self.stop_on_failed_tests = True
def has_threads_alive(self):
for t in pydevd_utils.get_non_pydevd_threads():
if isinstance(t, PyDBDaemonThread):
pydev_log.error_once(
'Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.\n')
if is_thread_alive(t):
if not t.isDaemon() or hasattr(t, "__pydevd_main_thread"):
return True
return False
def finish_debugging_session(self):
self._finish_debugging_session = True
def initialize_network(self, sock):
try:
sock.settimeout(None) # infinite, no timeouts from now on - jython does not have it
except:
pass
self.writer = WriterThread(sock)
self.reader = ReaderThread(sock)
self.writer.start()
self.reader.start()
time.sleep(0.1) # give threads time to start
def connect(self, host, port):
if host:
self.communication_role = CommunicationRole.CLIENT
s = start_client(host, port)
else:
self.communication_role = CommunicationRole.SERVER
s = start_server(port)
self.initialize_network(s)
def get_internal_queue(self, thread_id):
""" returns internal command queue for a given thread.
if new queue is created, notify the RDB about it """
if thread_id.startswith('__frame__'):
thread_id = thread_id[thread_id.rfind('|') + 1:]
return self._cmd_queue[thread_id]
def post_internal_command(self, int_cmd, thread_id):
""" if thread_id is *, post to the '*' queue"""
queue = self.get_internal_queue(thread_id)
queue.put(int_cmd)
def enable_output_redirection(self, redirect_stdout, redirect_stderr):
global bufferStdOutToServer
global bufferStdErrToServer
bufferStdOutToServer = redirect_stdout
bufferStdErrToServer = redirect_stderr
self.redirect_output = redirect_stdout or redirect_stderr
if bufferStdOutToServer:
init_stdout_redirect()
if bufferStdErrToServer:
init_stderr_redirect()
def check_output_redirect(self):
global bufferStdOutToServer
global bufferStdErrToServer
if bufferStdOutToServer:
init_stdout_redirect()
if bufferStdErrToServer:
init_stderr_redirect()
def init_matplotlib_in_debug_console(self):
# import hook and patches for matplotlib support in debug console
from _pydev_bundle.pydev_import_hook import import_hook_manager
for module in dict_keys(self.mpl_modules_for_patching):
import_hook_manager.add_module_name(module, self.mpl_modules_for_patching.pop(module))
def init_matplotlib_support(self):
# prepare debugger for integration with matplotlib GUI event loop
from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot, do_enable_gui
# enable_gui_function in activate_matplotlib should be called in main thread. Unlike integrated console,
# in the debug console we have no interpreter instance with exec_queue, but we run this code in the main
# thread and can call it directly.
class _MatplotlibHelper:
_return_control_osc = False
def return_control():
# Some of the input hooks (e.g. Qt4Agg) check return control without doing
# a single operation, so we don't return True on every
# call when the debug hook is in place to allow the GUI to run
_MatplotlibHelper._return_control_osc = not _MatplotlibHelper._return_control_osc
return _MatplotlibHelper._return_control_osc
from pydev_ipython.inputhook import set_return_control_callback
set_return_control_callback(return_control)
self.mpl_modules_for_patching = {"matplotlib": lambda: activate_matplotlib(do_enable_gui),
"matplotlib.pyplot": activate_pyplot,
"pylab": activate_pylab }
def _activate_mpl_if_needed(self):
if len(self.mpl_modules_for_patching) > 0:
for module in dict_keys(self.mpl_modules_for_patching):
if module in sys.modules:
activate_function = self.mpl_modules_for_patching.pop(module)
activate_function()
self.mpl_in_use = True
def _call_mpl_hook(self):
try:
from pydev_ipython.inputhook import get_inputhook
inputhook = get_inputhook()
if inputhook:
inputhook()
except:
pass
def notify_thread_created(self, thread_id, thread, use_lock=True):
if self.writer is None:
# Protect about threads being created before the communication structure is in place
# (note that they will appear later on anyways as pydevd does reconcile live/dead threads
# when processing internal commands, albeit it may take longer and in general this should
# not be usual as it's expected that the debugger is live before other threads are created).
return
with self._lock_running_thread_ids if use_lock else NULL:
if thread_id in self._running_thread_ids:
return
additional_info = set_additional_thread_info(thread)
if additional_info.pydev_notify_kill:
# After we notify it should be killed, make sure we don't notify it's alive (on a racing condition
# this could happen as we may notify before the thread is stopped internally).
return
self._running_thread_ids[thread_id] = thread
self.writer.add_command(self.cmd_factory.make_thread_created_message(thread))
def notify_thread_not_alive(self, thread_id, use_lock=True):
""" if thread is not alive, cancel trace_dispatch processing """
if self.writer is None:
return
with self._lock_running_thread_ids if use_lock else NULL:
thread = self._running_thread_ids.pop(thread_id, None)
if thread is None:
return
was_notified = thread.additional_info.pydev_notify_kill
if not was_notified:
thread.additional_info.pydev_notify_kill = True
self.writer.add_command(self.cmd_factory.make_thread_killed_message(thread_id))
def process_internal_commands(self):
'''This function processes internal commands
'''
with self._main_lock:
self.check_output_redirect()
program_threads_alive = {}
all_threads = threadingEnumerate()
program_threads_dead = []
with self._lock_running_thread_ids:
for t in all_threads:
if getattr(t, 'is_pydev_daemon_thread', False):
pass # I.e.: skip the DummyThreads created from pydev daemon threads
elif isinstance(t, PyDBDaemonThread):
pydev_log.error_once('Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.\n')
elif is_thread_alive(t):
if not self._running_thread_ids:
# Fix multiprocessing debug with breakpoints in both main and child processes
# (https://youtrack.jetbrains.com/issue/PY-17092) When the new process is created, the main
# thread in the new process already has the attribute 'pydevd_id', so the new thread doesn't
# get new id with its process number and the debugger loses access to both threads.
# Therefore we should update thread_id for every main thread in the new process.
# Fix it for all existing threads.
for existing_thread in all_threads:
old_thread_id = get_thread_id(existing_thread)
clear_cached_thread_id(t)
thread_id = get_thread_id(t)
if thread_id != old_thread_id:
if pydevd_vars.has_additional_frames_by_id(old_thread_id):
frames_by_id = pydevd_vars.get_additional_frames_by_id(old_thread_id)
pydevd_vars.add_additional_frame_by_id(thread_id, frames_by_id)
thread_id = get_thread_id(t)
program_threads_alive[thread_id] = t
self.notify_thread_created(thread_id, t, use_lock=False)
# Compute and notify about threads which are no longer alive.
thread_ids = list(self._running_thread_ids.keys())
for thread_id in thread_ids:
if thread_id not in program_threads_alive:
program_threads_dead.append(thread_id)
for thread_id in program_threads_dead:
self.notify_thread_not_alive(thread_id, use_lock=False)
# Without self._lock_running_thread_ids
if len(program_threads_alive) == 0:
self.finish_debugging_session()
for t in all_threads:
if hasattr(t, 'do_kill_pydev_thread'):
t.do_kill_pydev_thread()
else:
# Actually process the commands now (make sure we don't have a lock for _lock_running_thread_ids
# acquired at this point as it could lead to a deadlock if some command evaluated tried to
# create a thread and wait for it -- which would try to notify about it getting that lock).
curr_thread_id = get_current_thread_id(threadingCurrentThread())
for thread_id in (curr_thread_id, '*'):
queue = self.get_internal_queue(thread_id)
# some commands must be processed by the thread itself... if that's the case,
# we will re-add the commands to the queue after executing.
cmds_to_add_back = []
try:
while True:
int_cmd = queue.get(False)
if not self.mpl_hooks_in_debug_console and isinstance(int_cmd, InternalConsoleExec):
# add import hooks for matplotlib patches if only debug console was started
try:
self.init_matplotlib_in_debug_console()
self.mpl_in_use = True
except:
pydevd_log(2, "Matplotlib support in debug console failed", traceback.format_exc())
self.mpl_hooks_in_debug_console = True
if int_cmd.can_be_executed_by(curr_thread_id):
pydevd_log(2, "processing internal command ", str(int_cmd))
int_cmd.do_it(self)
else:
pydevd_log(2, "NOT processing internal command ", str(int_cmd))
cmds_to_add_back.append(int_cmd)
except _queue.Empty: # @UndefinedVariable
# this is how we exit
for int_cmd in cmds_to_add_back:
queue.put(int_cmd)
def consolidate_breakpoints(self, file, id_to_breakpoint, breakpoints):
break_dict = {}
for breakpoint_id, pybreakpoint in dict_iter_items(id_to_breakpoint):
break_dict[pybreakpoint.line] = pybreakpoint
breakpoints[file] = break_dict
self.clear_skip_caches()
def clear_skip_caches(self):
global_cache_skips.clear()
global_cache_frame_skips.clear()
def add_break_on_exception(
self,
exception,
condition,
expression,
notify_on_handled_exceptions,
notify_on_unhandled_exceptions,
notify_on_first_raise_only,
ignore_libraries=False
):
try:
eb = ExceptionBreakpoint(
exception,
condition,
expression,
notify_on_handled_exceptions,
notify_on_unhandled_exceptions,
notify_on_first_raise_only,
ignore_libraries
)
except ImportError:
pydev_log.error("Error unable to add break on exception for: %s (exception could not be imported)\n" % (exception,))
return None
if eb.notify_on_unhandled_exceptions:
cp = self.break_on_uncaught_exceptions.copy()
cp[exception] = eb
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.error("Exceptions to hook on terminate: %s\n" % (cp,))
self.break_on_uncaught_exceptions = cp
if eb.notify_on_handled_exceptions:
cp = self.break_on_caught_exceptions.copy()
cp[exception] = eb
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.error("Exceptions to hook always: %s\n" % (cp,))
self.break_on_caught_exceptions = cp
return eb
def _mark_suspend(self, thread, stop_reason):
info = set_additional_thread_info(thread)
info.suspend_type = PYTHON_SUSPEND
thread.stop_reason = stop_reason
if info.pydev_step_cmd == -1:
# If the step command is not specified, set it to step into
# to make sure it'll break as soon as possible.
info.pydev_step_cmd = CMD_STEP_INTO
# Mark as suspend as the last thing.
info.pydev_state = STATE_SUSPEND
return info
def set_suspend(self, thread, stop_reason, suspend_other_threads=False, is_pause=False):
'''
:param thread:
The thread which should be suspended.
:param stop_reason:
Reason why the thread was suspended.
:param suspend_other_threads:
Whether to force other threads to be suspended (i.e.: when hitting a breakpoint
with a suspend all threads policy).
:param is_pause:
If this is a pause to suspend all threads, any thread can be considered as the 'main'
thread paused.
'''
self._threads_suspended_single_notification.increment_suspend_time()
if is_pause:
self._threads_suspended_single_notification.on_pause()
info = self._mark_suspend(thread, stop_reason)
if is_pause:
# Must set tracing after setting the state to suspend.
frame = info.get_topmost_frame(thread)
if frame is not None:
try:
self.set_trace_for_frame_and_parents(frame)
finally:
frame = None
# If conditional breakpoint raises any exception during evaluation send the details to the client.
if stop_reason == CMD_SET_BREAK and info.conditional_breakpoint_exception is not None:
conditional_breakpoint_exception_tuple = info.conditional_breakpoint_exception
info.conditional_breakpoint_exception = None
self._send_breakpoint_condition_exception(thread, conditional_breakpoint_exception_tuple)
if not suspend_other_threads and self.multi_threads_single_notification:
# In the mode which gives a single notification when all threads are
# stopped, stop all threads whenever a set_suspend is issued.
suspend_other_threads = True
if suspend_other_threads:
# Suspend all other threads.
all_threads = pydevd_utils.get_non_pydevd_threads()
for t in all_threads:
if getattr(t, 'pydev_do_not_trace', None):
pass # skip some other threads, i.e. ipython history saving thread from debug console
else:
if t is thread:
continue
info = self._mark_suspend(t, CMD_THREAD_SUSPEND)
frame = info.get_topmost_frame(t)
# Reset the time as in this case this was not the main thread suspended.
if frame is not None:
try:
self.set_trace_for_frame_and_parents(frame)
finally:
frame = None
def _send_breakpoint_condition_exception(self, thread, conditional_breakpoint_exception_tuple):
"""If conditional breakpoint raises an exception during evaluation
send exception details to java
"""
thread_id = get_thread_id(thread)
# conditional_breakpoint_exception_tuple - should contain 2 values (exception_type, stacktrace)
if conditional_breakpoint_exception_tuple and len(conditional_breakpoint_exception_tuple) == 2:
exc_type, stacktrace = conditional_breakpoint_exception_tuple
int_cmd = InternalGetBreakpointException(thread_id, exc_type, stacktrace)
self.post_internal_command(int_cmd, thread_id)
def send_caught_exception_stack(self, thread, arg, curr_frame_id):
"""Sends details on the exception which was caught (and where we stopped) to the java side.
arg is: exception type, description, traceback object
"""
thread_id = get_thread_id(thread)
int_cmd = InternalSendCurrExceptionTrace(thread_id, arg, curr_frame_id)
self.post_internal_command(int_cmd, thread_id)
def send_caught_exception_stack_proceeded(self, thread):
"""Sends that some thread was resumed and is no longer showing an exception trace.
"""
thread_id = get_thread_id(thread)
int_cmd = InternalSendCurrExceptionTraceProceeded(thread_id)
self.post_internal_command(int_cmd, thread_id)
self.process_internal_commands()
def send_process_created_message(self):
"""Sends a message that a new process has been created.
"""
cmd = self.cmd_factory.make_process_created_message()
self.writer.add_command(cmd)
def send_process_will_be_substituted(self):
"""When `PyDB` works in server mode this method sends a message that a
new process is going to be created. After that it waits for the
response from the IDE to be sure that the IDE received this message.
Waiting for the response is required because the current process might
become substituted before it actually sends the message and the IDE
will not try to connect to `PyDB` in this case.
When `PyDB` works in client mode this method does nothing because the
substituted process will try to connect to the IDE itself.
"""
if self.communication_role == CommunicationRole.SERVER:
if self._main_lock.is_acquired_by_current_thread():
# if `_main_lock` is acquired by the current thread then `event.wait()` would stuck
# because the corresponding call of `event.set()` is made under the same `_main_lock`
pydev_log.debug("Skip sending process substitution notification\n")
return
cmd = self.cmd_factory.make_process_created_message()
# register event before putting command to the message queue
event = threading.Event()
self.process_created_msg_received_events[cmd.seq] = event
self.writer.add_command(cmd)
event.wait()
def set_next_statement(self, frame, event, func_name, next_line):
stop = False
response_msg = ""
old_line = frame.f_lineno
if event == 'line' or event == 'exception':
#If we're already in the correct context, we have to stop it now, because we can act only on
#line events -- if a return was the next statement it wouldn't work (so, we have this code
#repeated at pydevd_frame).
curr_func_name = frame.f_code.co_name
#global context is set with an empty name
if curr_func_name in ('?', '<module>'):
curr_func_name = ''
if func_name == '*' or curr_func_name == func_name:
line = next_line
frame.f_trace = self.trace_dispatch
frame.f_lineno = line
stop = True
else:
response_msg = "jump is available only within the bottom frame"
return stop, old_line, response_msg
def cancel_async_evaluation(self, thread_id, frame_id):
self._main_lock.acquire()
try:
all_threads = threadingEnumerate()
for t in all_threads:
if getattr(t, 'is_pydev_daemon_thread', False) and hasattr(t, 'cancel_event') and hasattr(t, 'thread_id') and \
t.thread_id == thread_id and t.frame_id == frame_id:
t.cancel_event.set()
except:
traceback.print_exc()
finally:
self._main_lock.release()
def do_wait_suspend(self, thread, frame, event, arg, send_suspend_message=True, is_unhandled_exception=False): #@UnusedVariable
""" busy waits until the thread state changes to RUN
it expects thread's state as attributes of the thread.
Upon running, processes any outstanding Stepping commands.
:param is_unhandled_exception:
If True we should use the line of the exception instead of the current line in the frame
as the paused location on the top-level frame (exception info must be passed on 'arg').
"""
self.process_internal_commands()
thread_stack_str = '' # @UnusedVariable -- this is here so that `make_get_thread_stack_message`
# can retrieve it later.
thread_id = get_current_thread_id(thread)
stop_reason = thread.stop_reason
suspend_type = thread.additional_info.trace_suspend_type
if send_suspend_message:
# Send the suspend message
message = thread.additional_info.pydev_message
thread.additional_info.trace_suspend_type = 'trace' # Reset to trace mode for next call.
frame_to_lineno = {}
if is_unhandled_exception:
# arg must be the exception info (tuple(exc_type, exc, traceback))
tb = arg[2]
while tb is not None:
frame_to_lineno[tb.tb_frame] = tb.tb_lineno
tb = tb.tb_next
cmd = self.cmd_factory.make_thread_suspend_message(thread_id, frame, stop_reason, message, suspend_type, frame_to_lineno=frame_to_lineno)
frame_to_lineno.clear()
thread_stack_str = cmd.thread_stack_str # @UnusedVariable -- `make_get_thread_stack_message` uses it later.
self.writer.add_command(cmd)
with CustomFramesContainer.custom_frames_lock: # @UndefinedVariable
from_this_thread = []
for frame_id, custom_frame in dict_iter_items(CustomFramesContainer.custom_frames):
if custom_frame.thread_id == thread.ident:
# print >> sys.stderr, 'Frame created: ', frame_id
self.writer.add_command(self.cmd_factory.make_custom_frame_created_message(frame_id, custom_frame.name))
self.writer.add_command(self.cmd_factory.make_thread_suspend_message(frame_id, custom_frame.frame, CMD_THREAD_SUSPEND, "", suspend_type))
from_this_thread.append(frame_id)
with self._threads_suspended_single_notification.notify_thread_suspended(thread_id, stop_reason):
self._do_wait_suspend(thread, frame, event, arg, suspend_type, from_this_thread)
def _do_wait_suspend(self, thread, frame, event, arg, suspend_type, from_this_thread):
info = thread.additional_info
if info.pydev_state == STATE_SUSPEND and not self._finish_debugging_session:
# before every stop check if matplotlib modules were imported inside script code
self._activate_mpl_if_needed()
while info.pydev_state == STATE_SUSPEND and not self._finish_debugging_session:
if self.mpl_in_use:
# call input hooks if only matplotlib is in use
self._call_mpl_hook()
self.process_internal_commands()
time.sleep(0.01)
self.cancel_async_evaluation(get_current_thread_id(thread), str(id(frame)))
# process any stepping instructions
if info.pydev_step_cmd == CMD_STEP_INTO or info.pydev_step_cmd == CMD_STEP_INTO_MY_CODE:
info.pydev_step_stop = None
info.pydev_smart_step_context.smart_step_stop = None
elif info.pydev_step_cmd == CMD_STEP_OVER:
info.pydev_step_stop = frame
info.pydev_smart_step_context.smart_step_stop = None
self.set_trace_for_frame_and_parents(frame)
elif info.pydev_step_cmd == CMD_SMART_STEP_INTO:
self.set_trace_for_frame_and_parents(frame)
info.pydev_step_stop = None
info.pydev_smart_step_context.smart_step_stop = frame
elif info.pydev_step_cmd == CMD_RUN_TO_LINE or info.pydev_step_cmd == CMD_SET_NEXT_STATEMENT:
self.set_trace_for_frame_and_parents(frame)
stop = False
response_msg = ""
old_line = frame.f_lineno
if not IS_PYCHARM:
stop, _, response_msg = self.set_next_statement(frame, event, info.pydev_func_name, info.pydev_next_line)
if stop:
# Set next did not work...
info.pydev_step_cmd = -1
info.pydev_state = STATE_SUSPEND
thread.stop_reason = CMD_THREAD_SUSPEND
# return to the suspend state and wait for other command (without sending any
# additional notification to the client).
self._do_wait_suspend(thread, frame, event, arg, suspend_type, from_this_thread)
return
else:
try:
stop, old_line, response_msg = self.set_next_statement(frame, event, info.pydev_func_name, info.pydev_next_line)
except ValueError as e:
response_msg = "%s" % e
finally:
if GOTO_HAS_RESPONSE:
seq = info.pydev_message
cmd = self.cmd_factory.make_set_next_stmnt_status_message(seq, stop, response_msg)
self.writer.add_command(cmd)
info.pydev_message = ''
if stop:
cmd = self.cmd_factory.make_thread_run_message(get_current_thread_id(thread), info.pydev_step_cmd)
self.writer.add_command(cmd)
info.pydev_state = STATE_SUSPEND
thread.stop_reason = CMD_SET_NEXT_STATEMENT
self.do_wait_suspend(thread, frame, event, arg)
return
else:
info.pydev_step_cmd = -1
info.pydev_state = STATE_SUSPEND
thread.stop_reason = CMD_THREAD_SUSPEND
# return to the suspend state and wait for other command
self.do_wait_suspend(thread, frame, event, arg, send_suspend_message=False)
return
elif info.pydev_step_cmd == CMD_STEP_RETURN:
back_frame = frame.f_back
if back_frame is not None:
# steps back to the same frame (in a return call it will stop in the 'back frame' for the user)
info.pydev_step_stop = frame
self.set_trace_for_frame_and_parents(frame)
else:
# No back frame?!? -- this happens in jython when we have some frame created from an awt event
# (the previous frame would be the awt event, but this doesn't make part of 'jython', only 'java')
# so, if we're doing a step return in this situation, it's the same as just making it run
info.pydev_step_stop = None
info.pydev_step_cmd = -1
info.pydev_state = STATE_RUN
del frame
cmd = self.cmd_factory.make_thread_run_message(get_current_thread_id(thread), info.pydev_step_cmd)
self.writer.add_command(cmd)
with CustomFramesContainer.custom_frames_lock:
# The ones that remained on last_running must now be removed.
for frame_id in from_this_thread:
# print >> sys.stderr, 'Removing created frame: ', frame_id
self.writer.add_command(self.cmd_factory.make_thread_killed_message(frame_id))
def stop_on_unhandled_exception(self, thread, frame, frames_byid, arg):
pydev_log.debug("We are stopping in post-mortem\n")
thread_id = get_thread_id(thread)
pydevd_vars.add_additional_frame_by_id(thread_id, frames_byid)
exctype, value, tb = arg
tb = pydevd_utils.get_top_level_trace_in_project_scope(tb)
if sys.excepthook != dummy_excepthook:
original_excepthook(exctype, value, tb)
disable_excepthook() # Avoid printing the exception for the second time.
try:
try:
add_exception_to_frame(frame, arg)
self.set_suspend(thread, CMD_ADD_EXCEPTION_BREAK)
self.do_wait_suspend(thread, frame, 'exception', arg, is_unhandled_exception=True)
except KeyboardInterrupt as e:
raise e
except:
pydev_log.error("We've got an error while stopping in post-mortem: %s\n" % (arg[0],))
finally:
remove_exception_from_frame(frame)
pydevd_vars.remove_additional_frame_by_id(thread_id)
frame = None
def set_trace_for_frame_and_parents(self, frame, **kwargs):
disable = kwargs.pop('disable', False)
assert not kwargs
while frame is not None:
try:
# Make fast path faster!
abs_path_real_path_and_base = NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]
except:
abs_path_real_path_and_base = get_abs_path_real_path_and_base_from_frame(frame)
# Don't change the tracing on debugger-related files
file_type = get_file_type(abs_path_real_path_and_base[-1])
if file_type is None:
if disable:
if frame.f_trace is not None and frame.f_trace is not NO_FTRACE:
frame.f_trace = NO_FTRACE
elif frame.f_trace is not self.trace_dispatch:
frame.f_trace = self.trace_dispatch
frame = frame.f_back
del frame
def _create_pydb_command_thread(self):
curr_pydb_command_thread = self.py_db_command_thread
if curr_pydb_command_thread is not None:
curr_pydb_command_thread.do_kill_pydev_thread()
new_pydb_command_thread = self.py_db_command_thread = PyDBCommandThread(self)
new_pydb_command_thread.start()
def _create_check_output_thread(self):
curr_output_checker_thread = self.output_checker_thread
if curr_output_checker_thread is not None:
curr_output_checker_thread.do_kill_pydev_thread()
output_checker_thread = self.output_checker_thread = CheckOutputThread(self)
output_checker_thread.start()
def start_auxiliary_daemon_threads(self):
self._create_pydb_command_thread()
self._create_check_output_thread()
def prepare_to_run(self, enable_tracing_from_start=True):
''' Shared code to prepare debugging by installing traces and registering threads '''
self._create_pydb_command_thread()
if self.redirect_output or self.signature_factory is not None or self.thread_analyser is not None:
# we need all data to be sent to IDE even after program finishes
self._create_check_output_thread()
# turn off frame evaluation for concurrency visualization
self.frame_eval_func = None
self.patch_threads()
if enable_tracing_from_start:
pydevd_tracing.SetTrace(self.trace_dispatch)
if show_tracing_warning or show_frame_eval_warning:
cmd = self.cmd_factory.make_show_warning_message("cython")
self.writer.add_command(cmd)
def patch_threads(self):
try:
# not available in jython!
threading.settrace(self.trace_dispatch) # for all future threads
except:
pass
from _pydev_bundle.pydev_monkey import patch_thread_modules
patch_thread_modules()
def run(self, file, globals=None, locals=None, is_module=False, set_trace=True):
module_name = None
entry_point_fn = ''
if is_module:
# When launching with `python -m <module>`, python automatically adds
# an empty path to the PYTHONPATH which resolves files in the current
# directory, so, depending how pydevd itself is launched, we may need
# to manually add such an entry to properly resolve modules in the
# current directory
if '' not in sys.path:
sys.path.insert(0, '')
file, _, entry_point_fn = file.partition(':')
module_name = file
filename = get_fullname(file)
if filename is None:
mod_dir = get_package_dir(module_name)
if mod_dir is None:
sys.stderr.write("No module named %s\n" % file)
return
else:
filename = get_fullname("%s.__main__" % module_name)
if filename is None:
sys.stderr.write("No module named %s\n" % file)
return
else:
file = filename
else:
file = filename
mod_dir = os.path.dirname(filename)
main_py = os.path.join(mod_dir, '__main__.py')
main_pyc = os.path.join(mod_dir, '__main__.pyc')
if filename.endswith('__init__.pyc'):
if os.path.exists(main_pyc):
filename = main_pyc
elif os.path.exists(main_py):
filename = main_py
elif filename.endswith('__init__.py'):
if os.path.exists(main_pyc) and not os.path.exists(main_py):
filename = main_pyc
elif os.path.exists(main_py):
filename = main_py
sys.argv[0] = filename
if os.path.isdir(file):
new_target = os.path.join(file, '__main__.py')
if os.path.isfile(new_target):
file = new_target
m = None
if globals is None:
m = save_main_module(file, 'pydevd')
globals = m.__dict__
try:
globals['__builtins__'] = __builtins__
except NameError:
pass # Not there on Jython...
if locals is None:
locals = globals
# Predefined (writable) attributes: __name__ is the module's name;
# __doc__ is the module's documentation string, or None if unavailable;
# __file__ is the pathname of the file from which the module was loaded,
# if it was loaded from a file. The __file__ attribute is not present for
# C modules that are statically linked into the interpreter; for extension modules
# loaded dynamically from a shared library, it is the pathname of the shared library file.
# I think this is an ugly hack, bug it works (seems to) for the bug that says that sys.path should be the same in
# debug and run.
if sys.path[0] != '' and m is not None and m.__file__.startswith(sys.path[0]):
# print >> sys.stderr, 'Deleting: ', sys.path[0]
del sys.path[0]
if not is_module:
# now, the local directory has to be added to the pythonpath
# sys.path.insert(0, os.getcwd())
# Changed: it's not the local directory, but the directory of the file launched
# The file being run must be in the pythonpath (even if it was not before)
sys.path.insert(0, os.path.split(rPath(file))[0])
if set_trace:
while not self.ready_to_run:
time.sleep(0.1) # busy wait until we receive run command
if self.break_on_caught_exceptions or self.has_plugin_line_breaks or self.has_plugin_exception_breaks \
or self.signature_factory:
# disable frame evaluation if there are exception breakpoints with 'On raise' activation policy
# or if there are plugin exception breakpoints or if collecting run-time types is enabled
self.frame_eval_func = None
# call prepare_to_run when we already have all information about breakpoints
self.prepare_to_run()
t = threadingCurrentThread()
thread_id = get_current_thread_id(t)
if self.thread_analyser is not None:
wrap_threads()
self.thread_analyser.set_start_time(cur_time())
send_message("threading_event", 0, t.getName(), thread_id, "thread", "start", file, 1, None, parent=get_thread_id(t))
if self.asyncio_analyser is not None:
if IS_PY36_OR_GREATER:
wrap_asyncio()
# we don't have main thread in asyncio graph, so we should add a fake event
send_message("asyncio_event", 0, "Task", "Task", "thread", "stop", file, 1, frame=None, parent=None)
try:
if INTERACTIVE_MODE_AVAILABLE:
self.init_matplotlib_support()
except:
sys.stderr.write("Matplotlib support in debugger failed\n")
traceback.print_exc()
if hasattr(sys, 'exc_clear'):
# we should clean exception information in Python 2, before user's code execution
sys.exc_clear()
# Notify that the main thread is created.
self.notify_thread_created(thread_id, t)
if self.stop_on_start:
info = set_additional_thread_info(t)
t.additional_info.pydev_step_cmd = CMD_STEP_INTO_MY_CODE
# Note: important: set the tracing right before calling _exec.
if set_trace:
self.enable_tracing()
return self._exec(is_module, entry_point_fn, module_name, file, globals, locals)
def _exec(self, is_module, entry_point_fn, module_name, file, globals, locals):
'''
This function should have frames tracked by unhandled exceptions (the `_exec` name is important).
'''
if not is_module:
pydev_imports.execfile(file, globals, locals) # execute the script
else:
# treat ':' as a separator between module and entry point function
# if there is no entry point we run we same as with -m switch. Otherwise we perform
# an import and execute the entry point
if entry_point_fn:
mod = __import__(module_name, level=0, fromlist=[entry_point_fn], globals=globals, locals=locals)
func = getattr(mod, entry_point_fn)
func()
else:
# Run with the -m switch
import runpy
if hasattr(runpy, '_run_module_as_main'):
# Newer versions of Python actually use this when the -m switch is used.
if sys.version_info[:2] <= (2, 6):
runpy._run_module_as_main(module_name, set_argv0=False)
else:
runpy._run_module_as_main(module_name, alter_argv=False)
else:
runpy.run_module(module_name)
return globals
def exiting(self):
# noinspection PyBroadException
try:
sys.stdout.flush()
except:
pass
# noinspection PyBroadException
try:
sys.stderr.flush()
except:
pass
self.check_output_redirect()
cmd = self.cmd_factory.make_exit_message()
self.writer.add_command(cmd)
def wait_for_commands(self, globals):
self._activate_mpl_if_needed()
thread = threading.currentThread()
from _pydevd_bundle import pydevd_frame_utils
frame = pydevd_frame_utils.Frame(None, -1, pydevd_frame_utils.FCode("Console",
os.path.abspath(os.path.dirname(__file__))), globals, globals)
thread_id = get_current_thread_id(thread)
pydevd_vars.add_additional_frame_by_id(thread_id, {id(frame): frame})
cmd = self.cmd_factory.make_show_console_message(thread_id, frame)
self.writer.add_command(cmd)
while True:
if self.mpl_in_use:
# call input hooks if only matplotlib is in use
self._call_mpl_hook()
self.process_internal_commands()
time.sleep(0.01)
trace_dispatch = _trace_dispatch
frame_eval_func = frame_eval_func
dummy_trace_dispatch = dummy_trace_dispatch
# noinspection SpellCheckingInspection
@staticmethod
def stoptrace():
"""A proxy method for calling :func:`stoptrace` from the modules where direct import
is impossible because, for example, a circular dependency."""
PyDBDaemonThread.created_pydb_daemon_threads = {}
stoptrace()
def set_debug(setup):
setup['DEBUG_RECORD_SOCKET_READS'] = True
setup['DEBUG_TRACE_BREAKPOINTS'] = 1
setup['DEBUG_TRACE_LEVEL'] = 3
def enable_qt_support(qt_support_mode):
from _pydev_bundle import pydev_monkey_qt
pydev_monkey_qt.patch_qt(qt_support_mode)
def dump_threads(stream=None):
'''
Helper to dump thread info (default is printing to stderr).
'''
pydevd_utils.dump_threads(stream)
def usage(do_exit=True, exit_code=0):
sys.stdout.write('Usage:\n')
sys.stdout.write('\tpydevd.py --port N [(--client hostname) | --server] --file executable [file_options]\n')
if do_exit:
sys.exit(exit_code)
class _CustomWriter(object):
def __init__(self, out_ctx, wrap_stream, wrap_buffer, on_write=None):
'''
:param out_ctx:
1=stdout and 2=stderr
:param wrap_stream:
Either sys.stdout or sys.stderr.
:param bool wrap_buffer:
If True the buffer attribute (which wraps writing bytes) should be
wrapped.
:param callable(str) on_write:
May be a custom callable to be called when to write something.
If not passed the default implementation will create an io message
and send it through the debugger.
'''
self.encoding = getattr(wrap_stream, 'encoding', os.environ.get('PYTHONIOENCODING', 'utf-8'))
self._out_ctx = out_ctx
if wrap_buffer:
self.buffer = _CustomWriter(out_ctx, wrap_stream, wrap_buffer=False, on_write=on_write)
self._on_write = on_write
def flush(self):
pass # no-op here
def write(self, s):
if self._on_write is not None:
self._on_write(s)
return
if s:
if IS_PY2:
# Need s in bytes
if isinstance(s, unicode):
# Note: python 2.6 does not accept the "errors" keyword.
s = s.encode('utf-8', 'replace')
else:
# Need s in str
if isinstance(s, bytes):
s = s.decode(self.encoding, errors='replace')
py_db = get_global_debugger()
if py_db is not None:
# Note that the actual message contents will be a xml with utf-8, although
# the entry is str on py3 and bytes on py2.
cmd = py_db.cmd_factory.make_io_message(s, self._out_ctx)
py_db.writer.add_command(cmd)
def init_stdout_redirect(on_write=None):
if not hasattr(sys, '_pydevd_out_buffer_'):
wrap_buffer = True if not IS_PY2 else False
original = sys.stdout
sys._pydevd_out_buffer_ = _CustomWriter(1, original, wrap_buffer, on_write)
sys.stdout_original = original
sys.stdout = pydevd_io.IORedirector(original, sys._pydevd_out_buffer_, wrap_buffer) #@UndefinedVariable
def init_stderr_redirect(on_write=None):
if not hasattr(sys, '_pydevd_err_buffer_'):
wrap_buffer = True if not IS_PY2 else False
original = sys.stderr
sys._pydevd_err_buffer_ = _CustomWriter(2, original, wrap_buffer, on_write)
sys.stderr_original = original
sys.stderr = pydevd_io.IORedirector(original, sys._pydevd_err_buffer_, wrap_buffer) #@UndefinedVariable
#=======================================================================================================================
# settrace
#=======================================================================================================================
def settrace(
host=None,
stdoutToServer=False,
stderrToServer=False,
port=5678,
suspend=True,
trace_only_current_thread=False,
overwrite_prev_trace=False,
patch_multiprocessing=False,
stop_at_frame=None,
):
'''Sets the tracing function with the pydev debug function and initializes needed facilities.
@param host: the user may specify another host, if the debug server is not in the same machine (default is the local
host)
@param stdoutToServer: when this is true, the stdout is passed to the debug server
@param stderrToServer: when this is true, the stderr is passed to the debug server
so that they are printed in its console and not in this process console.
@param port: specifies which port to use for communicating with the server (note that the server must be started
in the same port). @note: currently it's hard-coded at 5678 in the client
@param suspend: whether a breakpoint should be emulated as soon as this function is called.
@param trace_only_current_thread: determines if only the current thread will be traced or all current and future
threads will also have the tracing enabled.
@param overwrite_prev_trace: deprecated
@param patch_multiprocessing: if True we'll patch the functions which create new processes so that launched
processes are debugged.
@param stop_at_frame: if passed it'll stop at the given frame, otherwise it'll stop in the function which
called this method.
'''
_set_trace_lock.acquire()
try:
_locked_settrace(
host,
stdoutToServer,
stderrToServer,
port,
suspend,
trace_only_current_thread,
patch_multiprocessing,
stop_at_frame,
)
finally:
_set_trace_lock.release()
_set_trace_lock = thread.allocate_lock()
def _locked_settrace(
host,
stdoutToServer,
stderrToServer,
port,
suspend,
trace_only_current_thread,
patch_multiprocessing,
stop_at_frame,
):
if patch_multiprocessing:
try:
from _pydev_bundle import pydev_monkey
except:
pass
else:
pydev_monkey.patch_new_process_functions()
global connected
global bufferStdOutToServer
global bufferStdErrToServer
# Reset created PyDB daemon threads after fork - parent threads don't exist in a child process.
PyDBDaemonThread.created_pydb_daemon_threads = {}
if not connected:
pydevd_vm_type.setup_type()
if SetupHolder.setup is None:
setup = {
'client': host, # dispatch expects client to be set to the host address when server is False
'server': False,
'port': int(port),
'multiprocess': patch_multiprocessing,
}
SetupHolder.setup = setup
debugger = PyDB()
pydev_log.debug("pydev debugger: process %d is connecting\n" % os.getpid())
debugger.connect(host, port) # Note: connect can raise error.
# Mark connected only if it actually succeeded.
connected = True
bufferStdOutToServer = stdoutToServer
bufferStdErrToServer = stderrToServer
if bufferStdOutToServer:
init_stdout_redirect()
if bufferStdErrToServer:
init_stderr_redirect()
patch_stdin(debugger)
t = threadingCurrentThread()
additional_info = set_additional_thread_info(t)
while not debugger.ready_to_run:
time.sleep(0.1) # busy wait until we receive run command
# Set the tracing only
debugger.set_trace_for_frame_and_parents(get_frame().f_back)
CustomFramesContainer.custom_frames_lock.acquire() # @UndefinedVariable
try:
for _frameId, custom_frame in dict_iter_items(CustomFramesContainer.custom_frames):
debugger.set_trace_for_frame_and_parents(custom_frame.frame)
finally:
CustomFramesContainer.custom_frames_lock.release() # @UndefinedVariable
debugger.start_auxiliary_daemon_threads()
debugger.enable_tracing(apply_to_all_threads=True)
if not trace_only_current_thread:
# Trace future threads?
debugger.patch_threads()
# As this is the first connection, also set tracing for any untraced threads
debugger.set_tracing_for_untraced_contexts(ignore_current_thread=True)
# Stop the tracing as the last thing before the actual shutdown for a clean exit.
atexit.register(stoptrace)
else:
# ok, we're already in debug mode, with all set, so, let's just set the break
debugger = get_global_debugger()
debugger.set_trace_for_frame_and_parents(get_frame().f_back)
t = threadingCurrentThread()
additional_info = set_additional_thread_info(t)
debugger.enable_tracing()
if not trace_only_current_thread:
# Trace future threads?
debugger.patch_threads()
# Suspend as the last thing after all tracing is in place.
if suspend:
if stop_at_frame is not None:
# If the step was set we have to go to run state and
# set the proper frame for it to stop.
additional_info.pydev_state = STATE_RUN
additional_info.pydev_step_cmd = CMD_STEP_OVER
additional_info.pydev_step_stop = stop_at_frame
additional_info.suspend_type = PYTHON_SUSPEND
else:
# Ask to break as soon as possible.
debugger.set_suspend(t, CMD_SET_BREAK)
class Dispatcher(object):
def __init__(self):
self.port = None
def connect(self, host, port):
self.host = host
self.port = port
self.client = start_client(self.host, self.port)
self.reader = DispatchReader(self)
self.reader.pydev_do_not_trace = False # We run reader in the same thread so we don't want to loose tracing.
self.reader.run()
def close(self):
try:
self.reader.do_kill_pydev_thread()
except :
pass
class DispatchReader(ReaderThread):
def __init__(self, dispatcher):
self.dispatcher = dispatcher
ReaderThread.__init__(self, self.dispatcher.client)
@overrides(ReaderThread._on_run)
def _on_run(self):
dummy_thread = threading.currentThread()
dummy_thread.is_pydev_daemon_thread = False
return ReaderThread._on_run(self)
def handle_except(self):
ReaderThread.handle_except(self)
def process_command(self, cmd_id, seq, text):
if cmd_id == 99:
self.dispatcher.port = int(text)
self.killReceived = True
def _should_use_existing_connection(setup):
'''
The new connection dispatch approach is used by PyDev when the `multiprocess` option is set,
the existing connection approach is used by PyCharm when the `multiproc` option is set.
'''
return setup.get('multiproc', False)
def dispatch():
setup = SetupHolder.setup
host = setup['client']
port = setup['port']
if _should_use_existing_connection(setup):
dispatcher = Dispatcher()
try:
dispatcher.connect(host, port)
port = dispatcher.port
finally:
dispatcher.close()
return host, port
def settrace_forked():
'''
When creating a fork from a process in the debugger, we need to reset the whole debugger environment!
'''
from _pydevd_bundle.pydevd_constants import GlobalDebuggerHolder
GlobalDebuggerHolder.global_dbg = None
from _pydevd_frame_eval.pydevd_frame_eval_main import clear_thread_local_info
host, port = dispatch()
import pydevd_tracing
pydevd_tracing.restore_sys_set_trace_func()
if port is not None:
global connected
connected = False
global forked
forked = True
custom_frames_container_init()
if clear_thread_local_info is not None:
clear_thread_local_info()
settrace(
host,
port=port,
suspend=False,
trace_only_current_thread=False,
overwrite_prev_trace=True,
patch_multiprocessing=True,
)
#=======================================================================================================================
# SetupHolder
#=======================================================================================================================
class SetupHolder:
setup = None
def apply_debugger_options(setup_options):
"""
:type setup_options: dict[str, bool]
"""
default_options = {'save-signatures': False, 'qt-support': ''}
default_options.update(setup_options)
setup_options = default_options
debugger = GetGlobalDebugger()
if setup_options['save-signatures']:
if pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON:
sys.stderr.write("Collecting run-time type information is not supported for Jython\n")
else:
# Only import it if we're going to use it!
from _pydevd_bundle.pydevd_signature import SignatureFactory
debugger.signature_factory = SignatureFactory()
if setup_options['qt-support']:
enable_qt_support(setup_options['qt-support'])
def patch_stdin(debugger):
from _pydev_bundle.pydev_stdin import DebugConsoleStdIn
orig_stdin = sys.stdin
sys.stdin = DebugConsoleStdIn(debugger, orig_stdin)
def handle_keyboard_interrupt():
debugger = get_global_debugger()
if not debugger:
return
debugger.disable_tracing()
_, value, tb = sys.exc_info()
while tb:
filename = tb.tb_frame.f_code.co_filename
if debugger.in_project_scope(filename) and '_pydevd' not in filename:
break
tb = tb.tb_next
if tb:
limit = 1
tb_next = tb.tb_next
# When stopping the suspended debugger, traceback can contain two stack traces with the same frame.
if tb_next and tb_next.tb_frame is tb.tb_frame:
tb_next = None
while tb_next:
filename = tb_next.tb_frame.f_code.co_filename
if get_file_type(os.path.basename(filename)) or '_pydevd' in filename:
break
limit += 1
if tb_next.tb_next and tb_next.tb_next.tb_frame is not tb_next.tb_frame:
tb_next = tb_next.tb_next
else:
break
try:
value = value.with_traceback(tb)
except AttributeError:
value.__traceback__ = tb
value.__cause__ = None
traceback.print_exception(type(value), value, tb, limit=limit)
disable_excepthook()
# Dispatch on_debugger_modules_loaded here, after all primary debugger modules are loaded
from _pydevd_bundle.pydevd_extension_api import DebuggerEventHandler
from _pydevd_bundle import pydevd_extension_utils
for handler in pydevd_extension_utils.extensions_of_type(DebuggerEventHandler):
handler.on_debugger_modules_loaded(debugger_version=__version__)
#=======================================================================================================================
# main
#=======================================================================================================================
def main():
# parse the command line. --file is our last argument that is required
try:
from _pydevd_bundle.pydevd_command_line_handling import process_command_line
setup = process_command_line(sys.argv)
SetupHolder.setup = setup
except ValueError:
traceback.print_exc()
usage(exit_code=1)
# noinspection PyUnboundLocalVariable
if setup['help']:
usage()
if setup['print-in-debugger-startup']:
try:
pid = ' (pid: %s)' % os.getpid()
except:
pid = ''
sys.stderr.write("pydev debugger: starting%s\n" % pid)
fix_getpass.fix_getpass()
pydev_log.debug("Executing file %s" % setup['file'])
pydev_log.debug("arguments: %s"% str(sys.argv))
pydevd_vm_type.setup_type(setup.get('vm_type', None))
if SHOW_DEBUG_INFO_ENV:
set_debug(setup)
DebugInfoHolder.DEBUG_RECORD_SOCKET_READS = setup.get('DEBUG_RECORD_SOCKET_READS', DebugInfoHolder.DEBUG_RECORD_SOCKET_READS)
DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS = setup.get('DEBUG_TRACE_BREAKPOINTS', DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS)
DebugInfoHolder.DEBUG_TRACE_LEVEL = setup.get('DEBUG_TRACE_LEVEL', DebugInfoHolder.DEBUG_TRACE_LEVEL)
port = setup['port']
host = setup['client']
f = setup['file']
fix_app_engine_debug = False
debugger = PyDB()
try:
from _pydev_bundle import pydev_monkey
except:
pass #Not usable on jython 2.1
else:
if setup['multiprocess']: # PyDev
pydev_monkey.patch_new_process_functions()
elif setup['multiproc']: # PyCharm
pydev_log.debug("Started in multiproc mode\n")
dispatcher = Dispatcher()
try:
dispatcher.connect(host, port)
if dispatcher.port is not None:
port = dispatcher.port
pydev_log.debug("Received port %d\n" % port)
pydev_log.debug("pydev debugger: process %d is connecting\n" % os.getpid())
try:
pydev_monkey.patch_new_process_functions()
except:
pydev_log.error("Error patching process functions\n")
traceback.print_exc()
else:
pydev_log.error("pydev debugger: couldn't get port for new debug process\n")
finally:
dispatcher.close()
else:
try:
pydev_monkey.patch_new_process_functions_with_warning()
except:
pydev_log.error("Error patching process functions\n")
traceback.print_exc()
# Only do this patching if we're not running with multiprocess turned on.
if f.find('dev_appserver.py') != -1:
if os.path.basename(f).startswith('dev_appserver.py'):
appserver_dir = os.path.dirname(f)
version_file = os.path.join(appserver_dir, 'VERSION')
if os.path.exists(version_file):
try:
stream = open(version_file, 'r')
try:
for line in stream.read().splitlines():
line = line.strip()
if line.startswith('release:'):
line = line[8:].strip()
version = line.replace('"', '')
version = version.split('.')
if int(version[0]) > 1:
fix_app_engine_debug = True
elif int(version[0]) == 1:
if int(version[1]) >= 7:
# Only fix from 1.7 onwards
fix_app_engine_debug = True
break
finally:
stream.close()
except:
traceback.print_exc()
try:
# In the default run (i.e.: run directly on debug mode), we try to patch stackless as soon as possible
# on a run where we have a remote debug, we may have to be more careful because patching stackless means
# that if the user already had a stackless.set_schedule_callback installed, he'd loose it and would need
# to call it again (because stackless provides no way of getting the last function which was registered
# in set_schedule_callback).
#
# So, ideally, if there's an application using stackless and the application wants to use the remote debugger
# and benefit from stackless debugging, the application itself must call:
#
# import pydevd_stackless
# pydevd_stackless.patch_stackless()
#
# itself to be able to benefit from seeing the tasklets created before the remote debugger is attached.
from _pydevd_bundle import pydevd_stackless
pydevd_stackless.patch_stackless()
except:
# It's ok not having stackless there...
try:
sys.exc_clear() # the exception information should be cleaned in Python 2
except:
pass
is_module = setup['module']
patch_stdin(debugger)
if fix_app_engine_debug:
sys.stderr.write("pydev debugger: google app engine integration enabled\n")
curr_dir = os.path.dirname(__file__)
app_engine_startup_file = os.path.join(curr_dir, 'pydev_app_engine_debug_startup.py')
sys.argv.insert(1, '--python_startup_script=' + app_engine_startup_file)
import json
setup['pydevd'] = __file__
sys.argv.insert(2, '--python_startup_args=%s' % json.dumps(setup),)
sys.argv.insert(3, '--automatic_restart=no')
sys.argv.insert(4, '--max_module_instances=1')
# Run the dev_appserver
debugger.run(setup['file'], None, None, is_module, set_trace=False)
else:
if setup['save-threading']:
debugger.thread_analyser = ThreadingLogger()
if setup['save-asyncio']:
if IS_PY34_OR_GREATER:
debugger.asyncio_analyser = AsyncioLogger()
apply_debugger_options(setup)
try:
debugger.connect(host, port)
except:
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
traceback.print_exc()
sys.exit(1)
global connected
connected = True # Mark that we're connected when started from inside ide.
try:
globals = debugger.run(setup['file'], None, None, is_module)
except KeyboardInterrupt as e:
handle_keyboard_interrupt()
raise
if setup['cmd-line']:
debugger.wait_for_commands(globals)
if __name__ == '__main__':
main()
| apache-2.0 |
kazemakase/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
tkuipers/mycli | mycli/packages/tabulate.py | 28 | 38075 | # -*- coding: utf-8 -*-
"""Pretty-print tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from decimal import Decimal
from platform import python_version_tuple
from wcwidth import wcswidth
import re
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_int_type = int
_long_type = long
_float_type = float
_text_type = unicode
_binary_type = str
def _is_file(f):
return isinstance(f, file)
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
import io
def _is_file(f):
return isinstance(f, io.IOBase)
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.4"
MIN_PADDING = 2
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| ' }
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"' }
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
return "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = { "left": "l", "right": "r", "center": "c", "decimal": "r" }
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns):
def escape_char(c):
return LATEX_ESCAPE_RULES.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("╒", "═", "╤", "╕"),
linebelowheader=Line("╞", "═", "╪", "╡"),
linebetweenrows=Line("├", "─", "┼", "┤"),
linebelow=Line("╘", "═", "╧", "╛"),
headerrow=DataRow("│", "│", "│"),
datarow=DataRow("│", "│", "│"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"html":
TableFormat(lineabove=Line("<table>", "", "", ""),
linebelowheader=None,
linebetweenrows=None,
linebelow=Line("</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_invisible_codes = re.compile(r"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is _int_type or type(string) is _long_type or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) and \
_isconvertible(int, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
if isinstance(string, (bool, Decimal,)):
return _text_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
lwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s)
return ' ' * lwidth + s
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
rwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s)
return s + ' ' * rwidth
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
xwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s)
lwidth = xwidth // 2
rwidth = 0 if xwidth <= 0 else lwidth + xwidth % 2
return ' ' * lwidth + s + ' ' * rwidth
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return wcswidth(_strip_invisible(s))
else:
return wcswidth(_text_type(s))
def _align_column(strings, alignment, minwidth=0, has_invisible=True):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
return strings
else:
strings = [s.strip() for s in strings]
padfn = _padright
if has_invisible:
width_fn = _visible_width
else:
width_fn = wcswidth
maxwidth = max(max(map(width_fn, strings)), minwidth)
padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings]
return padded_strings
def _more_generic(type1, type2):
types = { _none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4 }
invtypes = { 4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type }
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible) for s in strings ]
return reduce(_more_generic, types, int)
def _format(val, valtype, floatfmt, missingval=""):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width):
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v]+list(row) for v,row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
#Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError('headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type,headers))
rows = list(map(list,rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=[], tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
╒═══════════╤═══════════╕
│ strings │ numbers │
╞═══════════╪═══════════╡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘═══════════╧═══════════╛
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
if has_invisible:
width_fn = _visible_width
else:
width_fn = wcswidth
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval) for v in c]
for c,ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
padded_headers = _pad_row(headers, pad)
padded_rows = [_pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.lineabove))
if padded_headers:
lines.append(_build_row(padded_headers, padded_widths, colaligns, headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelowheader))
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
lines.append(_build_line(padded_widths, colaligns, fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(padded_rows[-1], padded_widths, colaligns, fmt.datarow))
else:
for row in padded_rows:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelow))
return "\n".join(lines)
def _main():
"""\
Usage: tabulate [options] [FILE ...]
Pretty-print tabular data. See also https://bitbucket.org/astanin/python-tabulate
FILE a filename of the file with tabular data;
if "-" or missing, read data from stdin.
Options:
-h, --help show this message
-1, --header use the first row of data as a table header
-s REGEXP, --sep REGEXP use a custom column separator (default: whitespace)
-f FMT, --format FMT set output table format; supported formats:
plain, simple, grid, fancy_grid, pipe, orgtbl,
rst, mediawiki, html, latex, latex_booktabs, tsv
(default: simple)
"""
import getopt
import sys
import textwrap
usage = textwrap.dedent(_main.__doc__)
try:
opts, args = getopt.getopt(sys.argv[1:],
"h1f:s:",
["help", "header", "format", "separator"])
except getopt.GetoptError as e:
print(e)
print(usage)
sys.exit(2)
headers = []
tablefmt = "simple"
sep = r"\s+"
for opt, value in opts:
if opt in ["-1", "--header"]:
headers = "firstrow"
elif opt in ["-f", "--format"]:
if value not in tabulate_formats:
print("%s is not a supported table format" % value)
print(usage)
sys.exit(3)
tablefmt = value
elif opt in ["-s", "--sep"]:
sep = value
elif opt in ["-h", "--help"]:
print(usage)
sys.exit(0)
files = [sys.stdin] if not args else args
for f in files:
if f == "-":
f = sys.stdin
if _is_file(f):
_pprint_file(f, headers=headers, tablefmt=tablefmt, sep=sep)
else:
with open(f) as fobj:
_pprint_file(fobj)
def _pprint_file(fobject, headers, tablefmt, sep):
rows = fobject.readlines()
table = [re.split(sep, r.rstrip()) for r in rows]
print(tabulate(table, headers, tablefmt))
if __name__ == "__main__":
_main()
| bsd-3-clause |
7even7/DAT210x | Module6/assignment6.py | 8 | 2431 | import pandas as pd
import time
# Grab the DLA HAR dataset from:
# http://groupware.les.inf.puc-rio.br/har
# http://groupware.les.inf.puc-rio.br/static/har/dataset-har-PUC-Rio-ugulino.zip
#
# TODO: Load up the dataset into dataframe 'X'
#
# .. your code here ..
#
# TODO: Encode the gender column, 0 as male, 1 as female
#
# .. your code here ..
#
# TODO: Clean up any column with commas in it
# so that they're properly represented as decimals instead
#
# .. your code here ..
#
# INFO: Check data types
print X.dtypes
#
# TODO: Convert any column that needs to be converted into numeric
# use errors='raise'. This will alert you if something ends up being
# problematic
#
# .. your code here ..
#
# INFO: If you find any problematic records, drop them before calling the
# to_numeric methods above...
#
# TODO: Encode your 'y' value as a dummies version of your dataset's "class" column
#
# .. your code here ..
#
# TODO: Get rid of the user and class columns
#
# .. your code here ..
print X.describe()
#
# INFO: An easy way to show which rows have nans in them
print X[pd.isnull(X).any(axis=1)]
#
# TODO: Create an RForest classifier 'model' and set n_estimators=30,
# the max_depth to 10, and oob_score=True, and random_state=0
#
# .. your code here ..
#
# TODO: Split your data into test / train sets
# Your test size can be 30% with random_state 7
# Use variable names: X_train, X_test, y_train, y_test
#
# .. your code here ..
print "Fitting..."
s = time.time()
#
# TODO: train your model on your training set
#
# .. your code here ..
print "Fitting completed in: ", time.time() - s
#
# INFO: Display the OOB Score of your data
score = model.oob_score_
print "OOB Score: ", round(score*100, 3)
print "Scoring..."
s = time.time()
#
# TODO: score your model on your test set
#
# .. your code here ..
print "Score: ", round(score*100, 3)
print "Scoring completed in: ", time.time() - s
#
# TODO: Answer the lab questions, then come back to experiment more
#
# TODO: Try playing around with the gender column
# Encode it as Male:1, Female:0
# Try encoding it to pandas dummies
# Also try dropping it. See how it affects the score
# This will be a key on how features affect your overall scoring
# and why it's important to choose good ones.
#
# TODO: After that, try messing with 'y'. Right now its encoded with
# dummies try other encoding methods to experiment with the effect.
| mit |
MattNolanLab/ei-attractor | grid_cell_model/simulations/007_noise/figures/paper/i_place_cells/config.py | 1 | 2423 | '''Network test configuration file.'''
from __future__ import absolute_import, print_function
import os.path
from configobj import ConfigObj
import matplotlib.ticker as ti
scale_factor = 1.
tick_width = 1. * scale_factor
tick_len = 6. * scale_factor
DATA_ROOT = ['simulation_data', 'i_place_cells']
def get_config():
'''Return the configuration object.'''
_default_config = ConfigObj()
_default_config.merge({
'grids_data_root': os.path.join(*(DATA_ROOT + ['grids_max_rate_100_field_std_80'])),
'bump_data_root': None,
'vel_data_root': None,
'const_pos_data_root': None,
'singleDataRoot': None,
'connection_data_root': None,
'scale_factor': scale_factor,
'output_dir' : 'panels_weight_sparsity/',
'noise_sigmas': [150],
'even_shape': None,
# Sections
'mpl': {
'font.size': 11,
'pdf.fonttype': 42,
'mathtext.default': 'regular',
'font.sans-serif': ['Helvetica', 'Avant Garde',
'Computer Modern Sans serif'],
'xtick.major.size' : tick_len,
'xtick.major.width' : tick_width,
'xtick.minor.size' : tick_len / 2.,
'xtick.minor.width' : tick_width,
'xtick.direction' : 'out',
'ytick.major.size' : tick_len,
'ytick.major.width' : tick_width,
'ytick.minor.size' : tick_len / 2.,
'ytick.minor.width' : tick_width,
'ytick.direction' : 'out',
},
'IPCGridSweepsPlotter': {
'scale_factor': 1.,
'cbar': [1, 1, 1],
'cbar_kw': dict(
label = "Gridness score",
location = 'right',
shrink = 0.8,
pad = .05,
ticks = ti.MultipleLocator(0.2),
rasterized = True
),
'xlabel': 'Weight (nS)',
'ylabel': '# PCs connected',
'xticks': [True]*3,
'yticks': [True, False, False],
'ann': [None, None, None],
'bbox': (.15, .17, .9, .9),
'normalize_ticks': [False, False],
'vmin': None,
'vmax': None,
},
})
##########################################################################
return _default_config
| gpl-3.0 |
BiaDarkia/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 84 | 7866 | # Important note for the deprecation cleaning of 0.20 :
# All the function and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_gmm.py'
import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.utils.testing import assert_warns_message, ignore_warnings
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.mixture.dpgmm import digamma, gammaln
from sklearn.mixture.dpgmm import wishart_log_det, wishart_logz
np.seterr(all='warn')
@ignore_warnings(category=DeprecationWarning)
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
@ignore_warnings(category=DeprecationWarning)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_digamma():
assert_warns_message(DeprecationWarning, "The function digamma is"
" deprecated in 0.18 and will be removed in 0.20. "
"Use scipy.special.digamma instead.", digamma, 3)
@ignore_warnings(category=DeprecationWarning)
def test_gammaln():
assert_warns_message(DeprecationWarning, "The function gammaln"
" is deprecated in 0.18 and will be removed"
" in 0.20. Use scipy.special.gammaln instead.",
gammaln, 3)
@ignore_warnings(category=DeprecationWarning)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
result = assert_warns_message(DeprecationWarning, "The function "
"log_normalize is deprecated in 0.18 and"
" will be removed in 0.20.",
log_normalize, a)
assert np.allclose(v, result, rtol=0.01)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_log_det():
a = np.array([0.1, 0.8, 0.01, 0.09])
b = np.array([0.2, 0.7, 0.05, 0.1])
assert_warns_message(DeprecationWarning, "The function "
"wishart_log_det is deprecated in 0.18 and"
" will be removed in 0.20.",
wishart_log_det, a, b, 2, 4)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_logz():
assert_warns_message(DeprecationWarning, "The function "
"wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.", wishart_logz,
3, np.identity(3), 1, 3)
@ignore_warnings(category=DeprecationWarning)
def test_DPGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `DPGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type='dirichlet_process'` "
"instead. DPGMM is deprecated in 0.18 and will be removed in 0.20.",
DPGMM)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_VBGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `VBGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type="
"'dirichlet_distribution'` instead. VBGMM is deprecated "
"in 0.18 and will be removed in 0.20.", VBGMM)
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_vbgmm_no_modify_alpha():
alpha = 2.
n_components = 3
X, y = make_blobs(random_state=1)
vbgmm = VBGMM(n_components=n_components, alpha=alpha, n_iter=1)
assert_equal(vbgmm.alpha, alpha)
assert_equal(vbgmm.fit(X).alpha_, float(alpha) / n_components)
| bsd-3-clause |
luca-s/alphalens | alphalens/tears.py | 1 | 26976 | #
# Copyright 2017 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import pandas as pd
import warnings
from . import plotting
from . import performance as perf
from . import utils
class GridFigure(object):
"""
It makes life easier with grid plots
"""
def __init__(self, rows, cols):
self.rows = rows
self.cols = cols
self.fig = plt.figure(figsize=(14, rows * 7))
self.gs = gridspec.GridSpec(rows, cols, wspace=0.4, hspace=0.3)
self.curr_row = 0
self.curr_col = 0
def next_row(self):
if self.curr_col != 0:
self.curr_row += 1
self.curr_col = 0
subplt = plt.subplot(self.gs[self.curr_row, :])
self.curr_row += 1
return subplt
def next_cell(self):
if self.curr_col >= self.cols:
self.curr_row += 1
self.curr_col = 0
subplt = plt.subplot(self.gs[self.curr_row, self.curr_col])
self.curr_col += 1
return subplt
def close(self):
plt.close(self.fig)
self.fig = None
self.gs = None
@plotting.customize
def create_summary_tear_sheet(factor_data,
long_short=True,
group_neutral=False):
"""
Creates a small summary tear sheet with returns, information, and turnover
analysis.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
long_short : bool
Should this computation happen on a long short portfolio? if so, then
mean quantile returns will be demeaned across the factor universe.
group_neutral : bool
Should this computation happen on a group neutral portfolio? if so,
returns demeaning will occur on the group level.
"""
# Returns Analysis
mean_quant_ret, std_quantile = \
perf.mean_return_by_quantile(factor_data,
by_group=False,
demeaned=long_short,
group_adjust=group_neutral)
mean_quant_rateret = \
mean_quant_ret.apply(utils.rate_of_return, axis=0,
base_period=mean_quant_ret.columns[0])
mean_quant_ret_bydate, std_quant_daily = \
perf.mean_return_by_quantile(factor_data,
by_date=True,
by_group=False,
demeaned=long_short,
group_adjust=group_neutral)
mean_quant_rateret_bydate = mean_quant_ret_bydate.apply(
utils.rate_of_return,
axis=0,
base_period=mean_quant_ret_bydate.columns[0]
)
compstd_quant_daily = std_quant_daily.apply(
utils.std_conversion, axis=0,
base_period=std_quant_daily.columns[0]
)
alpha_beta = perf.factor_alpha_beta(factor_data,
demeaned=long_short,
group_adjust=group_neutral)
mean_ret_spread_quant, std_spread_quant = perf.compute_mean_returns_spread(
mean_quant_rateret_bydate,
factor_data['factor_quantile'].max(),
factor_data['factor_quantile'].min(),
std_err=compstd_quant_daily)
periods = utils.get_forward_returns_columns(factor_data.columns)
fr_cols = len(periods)
vertical_sections = 2 + fr_cols * 3
gf = GridFigure(rows=vertical_sections, cols=1)
plotting.plot_quantile_statistics_table(factor_data)
plotting.plot_returns_table(alpha_beta,
mean_quant_rateret,
mean_ret_spread_quant)
plotting.plot_quantile_returns_bar(mean_quant_rateret,
by_group=False,
ylim_percentiles=None,
ax=gf.next_row())
# Information Analysis
ic = perf.factor_information_coefficient(factor_data)
plotting.plot_information_table(ic)
# Turnover Analysis
quantile_factor = factor_data['factor_quantile']
quantile_turnover = \
{p: pd.concat([perf.quantile_turnover(quantile_factor, q, p)
for q in range(1, int(quantile_factor.max()) + 1)],
axis=1)
for p in periods}
autocorrelation = pd.concat(
[perf.factor_rank_autocorrelation(factor_data, period) for period in
periods], axis=1)
plotting.plot_turnover_table(autocorrelation, quantile_turnover)
plt.show()
gf.close()
@plotting.customize
def create_returns_tear_sheet(factor_data,
long_short=True,
group_neutral=False,
by_group=False):
"""
Creates a tear sheet for returns analysis of a factor.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to,
and (optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
long_short : bool
Should this computation happen on a long short portfolio? if so, then
mean quantile returns will be demeaned across the factor universe.
Additionally factor values will be demeaned across the factor universe
when factor weighting the portfolio for cumulative returns plots
group_neutral : bool
Should this computation happen on a group neutral portfolio? if so,
returns demeaning will occur on the group level.
Additionally each group will weight the same in cumulative returns
plots
by_group : bool
If True, display graphs separately for each group.
"""
factor_returns = perf.factor_returns(factor_data,
long_short,
group_neutral)
mean_quant_ret, std_quantile = \
perf.mean_return_by_quantile(factor_data,
by_group=False,
demeaned=long_short,
group_adjust=group_neutral)
mean_quant_rateret = \
mean_quant_ret.apply(utils.rate_of_return, axis=0,
base_period=mean_quant_ret.columns[0])
mean_quant_ret_bydate, std_quant_daily = \
perf.mean_return_by_quantile(factor_data,
by_date=True,
by_group=False,
demeaned=long_short,
group_adjust=group_neutral)
mean_quant_rateret_bydate = mean_quant_ret_bydate.apply(
utils.rate_of_return, axis=0,
base_period=mean_quant_ret_bydate.columns[0]
)
compstd_quant_daily = \
std_quant_daily.apply(utils.std_conversion, axis=0,
base_period=std_quant_daily.columns[0])
alpha_beta = perf.factor_alpha_beta(factor_data,
factor_returns,
long_short,
group_neutral)
mean_ret_spread_quant, std_spread_quant = \
perf.compute_mean_returns_spread(mean_quant_rateret_bydate,
factor_data['factor_quantile'].max(),
factor_data['factor_quantile'].min(),
std_err=compstd_quant_daily)
fr_cols = len(factor_returns.columns)
vertical_sections = 2 + fr_cols * 3
gf = GridFigure(rows=vertical_sections, cols=1)
plotting.plot_returns_table(alpha_beta,
mean_quant_rateret,
mean_ret_spread_quant)
plotting.plot_quantile_returns_bar(mean_quant_rateret,
by_group=False,
ylim_percentiles=None,
ax=gf.next_row())
plotting.plot_quantile_returns_violin(mean_quant_rateret_bydate,
ylim_percentiles=(1, 99),
ax=gf.next_row())
trading_calendar = factor_data.index.levels[0].freq
if trading_calendar is None:
trading_calendar = pd.tseries.offsets.BDay()
warnings.warn(
"'freq' not set in factor_data index: assuming business day",
UserWarning
)
for p in factor_returns:
title = ('Factor Weighted '
+ ('Group Neutral ' if group_neutral else '')
+ ('Long/Short ' if long_short else '')
+ "Portfolio Cumulative Return ({} Period)".format(p))
plotting.plot_cumulative_returns(
factor_returns[p],
period=p,
freq=trading_calendar,
title=title,
ax=gf.next_row()
)
plotting.plot_cumulative_returns_by_quantile(
mean_quant_ret_bydate[p],
period=p,
freq=trading_calendar,
ax=gf.next_row()
)
ax_mean_quantile_returns_spread_ts = [gf.next_row()
for x in range(fr_cols)]
plotting.plot_mean_quantile_returns_spread_time_series(
mean_ret_spread_quant,
std_err=std_spread_quant,
bandwidth=0.5,
ax=ax_mean_quantile_returns_spread_ts
)
plt.show()
gf.close()
if by_group:
mean_return_quantile_group, mean_return_quantile_group_std_err = \
perf.mean_return_by_quantile(factor_data,
by_date=False,
by_group=True,
demeaned=long_short,
group_adjust=group_neutral)
mean_quant_rateret_group = mean_return_quantile_group.apply(
utils.rate_of_return, axis=0,
base_period=mean_return_quantile_group.columns[0]
)
num_groups = len(mean_quant_rateret_group.index
.get_level_values('group').unique())
vertical_sections = 1 + (((num_groups - 1) // 2) + 1)
gf = GridFigure(rows=vertical_sections, cols=2)
ax_quantile_returns_bar_by_group = [gf.next_cell()
for _ in range(num_groups)]
plotting.plot_quantile_returns_bar(mean_quant_rateret_group,
by_group=True,
ylim_percentiles=(5, 95),
ax=ax_quantile_returns_bar_by_group)
plt.show()
gf.close()
@plotting.customize
def create_information_tear_sheet(factor_data,
group_neutral=False,
by_group=False):
"""
Creates a tear sheet for information analysis of a factor.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
group_neutral : bool
Demean forward returns by group before computing IC.
by_group : bool
If True, display graphs separately for each group.
"""
ic = perf.factor_information_coefficient(factor_data, group_neutral)
plotting.plot_information_table(ic)
columns_wide = 2
fr_cols = len(ic.columns)
rows_when_wide = (((fr_cols - 1) // columns_wide) + 1)
vertical_sections = fr_cols + 3 * rows_when_wide + 2 * fr_cols
gf = GridFigure(rows=vertical_sections, cols=columns_wide)
ax_ic_ts = [gf.next_row() for _ in range(fr_cols)]
plotting.plot_ic_ts(ic, ax=ax_ic_ts)
ax_ic_hqq = [gf.next_cell() for _ in range(fr_cols * 2)]
plotting.plot_ic_hist(ic, ax=ax_ic_hqq[::2])
plotting.plot_ic_qq(ic, ax=ax_ic_hqq[1::2])
if not by_group:
mean_monthly_ic = \
perf.mean_information_coefficient(factor_data,
group_adjust=group_neutral,
by_group=False,
by_time="M")
ax_monthly_ic_heatmap = [gf.next_cell() for x in range(fr_cols)]
plotting.plot_monthly_ic_heatmap(mean_monthly_ic,
ax=ax_monthly_ic_heatmap)
if by_group:
mean_group_ic = \
perf.mean_information_coefficient(factor_data,
group_adjust=group_neutral,
by_group=True)
plotting.plot_ic_by_group(mean_group_ic, ax=gf.next_row())
plt.show()
gf.close()
@plotting.customize
def create_turnover_tear_sheet(factor_data, turnover_periods=None):
"""
Creates a tear sheet for analyzing the turnover properties of a factor.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
turnover_periods : sequence[string], optional
Periods to compute turnover analysis on. By default periods in
'factor_data' are used but custom periods can provided instead. This
can be useful when periods in 'factor_data' are not multiples of the
frequency at which factor values are computed i.e. the periods
are 2h and 4h and the factor is computed daily and so values like
['1D', '2D'] could be used instead
"""
if turnover_periods is None:
turnover_periods = utils.get_forward_returns_columns(
factor_data.columns)
quantile_factor = factor_data['factor_quantile']
quantile_turnover = \
{p: pd.concat([perf.quantile_turnover(quantile_factor, q, p)
for q in range(1, int(quantile_factor.max()) + 1)],
axis=1)
for p in turnover_periods}
autocorrelation = pd.concat(
[perf.factor_rank_autocorrelation(factor_data, period) for period in
turnover_periods], axis=1)
plotting.plot_turnover_table(autocorrelation, quantile_turnover)
fr_cols = len(turnover_periods)
columns_wide = 1
rows_when_wide = (((fr_cols - 1) // 1) + 1)
vertical_sections = fr_cols + 3 * rows_when_wide + 2 * fr_cols
gf = GridFigure(rows=vertical_sections, cols=columns_wide)
for period in turnover_periods:
if quantile_turnover[period].isnull().all().all():
continue
plotting.plot_top_bottom_quantile_turnover(quantile_turnover[period],
period=period,
ax=gf.next_row())
for period in autocorrelation:
if autocorrelation[period].isnull().all():
continue
plotting.plot_factor_rank_auto_correlation(autocorrelation[period],
period=period,
ax=gf.next_row())
plt.show()
gf.close()
@plotting.customize
def create_full_tear_sheet(factor_data,
long_short=True,
group_neutral=False,
by_group=False):
"""
Creates a full tear sheet for analysis and evaluating single
return predicting (alpha) factor.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
long_short : bool
Should this computation happen on a long short portfolio?
- See tears.create_returns_tear_sheet for details on how this flag
affects returns analysis
group_neutral : bool
Should this computation happen on a group neutral portfolio?
- See tears.create_returns_tear_sheet for details on how this flag
affects returns analysis
- See tears.create_information_tear_sheet for details on how this
flag affects information analysis
by_group : bool
If True, display graphs separately for each group.
"""
plotting.plot_quantile_statistics_table(factor_data)
create_returns_tear_sheet(factor_data,
long_short,
group_neutral,
by_group,
set_context=False)
create_information_tear_sheet(factor_data,
group_neutral,
by_group,
set_context=False)
create_turnover_tear_sheet(factor_data, set_context=False)
@plotting.customize
def create_event_returns_tear_sheet(factor_data,
prices,
avgretplot=(5, 15),
long_short=True,
group_neutral=False,
std_bar=True,
by_group=False):
"""
Creates a tear sheet to view the average cumulative returns for a
factor within a window (pre and post event).
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex Series indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, the factor
quantile/bin that factor value belongs to and (optionally) the group
the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
prices : pd.DataFrame
A DataFrame indexed by date with assets in the columns containing the
pricing data.
- See full explanation in utils.get_clean_factor_and_forward_returns
avgretplot: tuple (int, int) - (before, after)
If not None, plot quantile average cumulative returns
long_short : bool
Should this computation happen on a long short portfolio? if so then
factor returns will be demeaned across the factor universe
group_neutral : bool
Should this computation happen on a group neutral portfolio? if so,
returns demeaning will occur on the group level.
std_bar : boolean, optional
Show plots with standard deviation bars, one for each quantile
by_group : bool
If True, display graphs separately for each group.
"""
before, after = avgretplot
avg_cumulative_returns = \
perf.average_cumulative_return_by_quantile(
factor_data,
prices,
periods_before=before,
periods_after=after,
demeaned=long_short,
group_adjust=group_neutral)
num_quantiles = int(factor_data['factor_quantile'].max())
vertical_sections = 1
if std_bar:
vertical_sections += (((num_quantiles - 1) // 2) + 1)
cols = 2 if num_quantiles != 1 else 1
gf = GridFigure(rows=vertical_sections, cols=cols)
plotting.plot_quantile_average_cumulative_return(avg_cumulative_returns,
by_quantile=False,
std_bar=False,
ax=gf.next_row())
if std_bar:
ax_avg_cumulative_returns_by_q = [gf.next_cell()
for _ in range(num_quantiles)]
plotting.plot_quantile_average_cumulative_return(
avg_cumulative_returns,
by_quantile=True,
std_bar=True,
ax=ax_avg_cumulative_returns_by_q)
plt.show()
gf.close()
if by_group:
groups = factor_data['group'].unique()
num_groups = len(groups)
vertical_sections = ((num_groups - 1) // 2) + 1
gf = GridFigure(rows=vertical_sections, cols=2)
avg_cumret_by_group = \
perf.average_cumulative_return_by_quantile(
factor_data,
prices,
periods_before=before,
periods_after=after,
demeaned=long_short,
group_adjust=group_neutral,
by_group=True)
for group, avg_cumret in avg_cumret_by_group.groupby(level='group'):
avg_cumret.index = avg_cumret.index.droplevel('group')
plotting.plot_quantile_average_cumulative_return(
avg_cumret,
by_quantile=False,
std_bar=False,
title=group,
ax=gf.next_cell())
plt.show()
gf.close()
@plotting.customize
def create_event_study_tear_sheet(factor_data,
prices=None,
avgretplot=(5, 15),
rate_of_ret=True,
n_bars=50):
"""
Creates an event study tear sheet for analysis of a specific event.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single event, forward returns for each
period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
prices : pd.DataFrame, required only if 'avgretplot' is provided
A DataFrame indexed by date with assets in the columns containing the
pricing data.
- See full explanation in utils.get_clean_factor_and_forward_returns
avgretplot: tuple (int, int) - (before, after), optional
If not None, plot event style average cumulative returns within a
window (pre and post event).
rate_of_ret : bool, optional
Display rate of return instead of simple return in 'Mean Period Wise
Return By Factor Quantile' and 'Period Wise Return By Factor Quantile'
plots
n_bars : int, optional
Number of bars in event distribution plot
"""
long_short = False
plotting.plot_quantile_statistics_table(factor_data)
gf = GridFigure(rows=1, cols=1)
plotting.plot_events_distribution(events=factor_data['factor'],
num_bars=n_bars,
ax=gf.next_row())
plt.show()
gf.close()
if prices is not None and avgretplot is not None:
create_event_returns_tear_sheet(factor_data=factor_data,
prices=prices,
avgretplot=avgretplot,
long_short=long_short,
group_neutral=False,
std_bar=True,
by_group=False)
factor_returns = perf.factor_returns(factor_data,
demeaned=False,
equal_weight=True)
mean_quant_ret, std_quantile = \
perf.mean_return_by_quantile(factor_data,
by_group=False,
demeaned=long_short)
if rate_of_ret:
mean_quant_ret = \
mean_quant_ret.apply(utils.rate_of_return, axis=0,
base_period=mean_quant_ret.columns[0])
mean_quant_ret_bydate, std_quant_daily = \
perf.mean_return_by_quantile(factor_data,
by_date=True,
by_group=False,
demeaned=long_short)
if rate_of_ret:
mean_quant_ret_bydate = mean_quant_ret_bydate.apply(
utils.rate_of_return, axis=0,
base_period=mean_quant_ret_bydate.columns[0]
)
fr_cols = len(factor_returns.columns)
vertical_sections = 2 + fr_cols * 1
gf = GridFigure(rows=vertical_sections, cols=1)
plotting.plot_quantile_returns_bar(mean_quant_ret,
by_group=False,
ylim_percentiles=None,
ax=gf.next_row())
plotting.plot_quantile_returns_violin(mean_quant_ret_bydate,
ylim_percentiles=(1, 99),
ax=gf.next_row())
trading_calendar = factor_data.index.levels[0].freq
if trading_calendar is None:
trading_calendar = pd.tseries.offsets.BDay()
warnings.warn(
"'freq' not set in factor_data index: assuming business day",
UserWarning
)
for p in factor_returns:
plotting.plot_cumulative_returns(
factor_returns[p],
period=p,
freq=trading_calendar,
ax=gf.next_row()
)
plt.show()
gf.close()
| apache-2.0 |
google-research/FirstOrderLp.jl | scripts/analyze_csv_data.py | 1 | 29257 | # Copyright 2021 The FirstOrderLp Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script generates all the experimental results used in the paper.
# It requires python 3, numpy, pandas, and matplotlib installed to run.
#
# `python analyze_csv_data.py`
#
# It reads csv files containing experimental results from ./csv, and outputs
# pdf figures to ./results/figs and latex tables to ./results/tex.
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from cycler import cycler
plt.rcParams.update({'figure.max_open_warning': 0, 'font.size': 16})
# This is required to generate plots that are easy to read when printed:
plt.rcParams['axes.prop_cycle'] = cycler(
linestyle=[
'-',
'--',
':',
'-.',
'-',
'--',
':',
'-.',
'-',
'--'],
color=[
'#1f77b4',
'#ff7f0e',
'#2ca02c',
'#d62728',
'#9467bd',
'#8c564b',
'#e377c2',
'#7f7f7f',
'#bcbd22',
'#17becf'])
# directory where the csv files are located
CSV_DIR = './csv'
# directory where all the figure pdf and table tex files are written to:
OUTPUT_DIR = './results'
FIGS_DIR = os.path.join(OUTPUT_DIR, 'figs')
TEX_DIR = os.path.join(OUTPUT_DIR, 'tex')
OPT = 'TERMINATION_REASON_OPTIMAL'
KKT_PASSES_LIMIT = 1e5
TIME_LIMIT_SECS = 60 * 60 # 1hr
# shift to use for shifted geometric mean
SGM_SHIFT = int(10)
# penalised average runtime:
PAR = 1. # can be None, which removes unsolved instead of penalizing
# Which scaling experiments to present
SCALING_EXPS_TO_USE = [
'off,off',
'off,pock_chambolle alpha=1',
'10 rounds,off',
'10 rounds,pock_chambolle alpha=1',
]
# Which primal-weight experiments to present
PRIMALWEIGHT_EXPS_TO_USE = [
'adaptive',
#'Fixed 1e-0',
]
# placeholder:
_BEST_STR = '_best_str_'
_BEST_FIXED = '_best_fixed_'
# Dataset names:
MITTELMANN_STR = 'lp_benchmark'
MIPLIB_STR = 'mip_relaxations'
# Change table font size to fit paper:
LATEX_FONT_SIZE = '\\small'
# Naming for improvements plots:
_PDHG = 'PDHG'
_RESTARTS = '+ restarts'
_SCALING = '+ scaling'
_PRIMAL_WEIGHT = '+ primal\nweight'
_STEPSIZE = '+ step\nsize'
_PRESOLVE = '+ presolve\n(= PDLP)'
# Order in which improvements should appear:
IMPROVEMENTS_ORDER = [
_PDHG,
_RESTARTS,
_SCALING,
_PRIMAL_WEIGHT,
_STEPSIZE,
_PRESOLVE]
IMPROVEMENTS_ORDER_IDX = dict(
zip(IMPROVEMENTS_ORDER, range(len(IMPROVEMENTS_ORDER))))
# Horrible HACK, but needs to be done
def label_lookup(label):
if 'pdhg_enhanced' in label:
return 'PDLP'
if 'mirror-prox' in label:
return 'Enh. Extragradient'
if 'pdhg_vanilla' in label:
return 'PDHG'
if 'scs-indirect' in label:
return 'SCS (matrix-free)'
if 'scs-direct' in label:
return 'SCS'
if 'nopresolve' in label:
return 'No presolve'
if 'no restarts' in label:
return 'No restart'
if 'adaptive theoretical' in label:
return 'Adaptive restart (theory)'
if 'adaptive enhanced' in label:
return 'PDLP'
if 'pdhg' in label and 'pdhg_mp_1h' in label:
return 'PDLP'
if 'off,off' in label:
return 'No scaling'
if 'off,pock_chambolle alpha=1' in label:
return 'Pock-Chambolle'
if '10 rounds,off' in label:
return 'Ruiz'
if '10 rounds,pock_chambolle alpha=1' in label:
return 'Ruiz + Pock-Chambolle'
if 'stepsize' in label:
if 'adaptive' in label:
return 'PDLP'
if 'fixed' in label:
return 'Fixed step-size'
if 'scaling' in label:
if _BEST_STR in label:
return 'Best per-instance scaling'
if 'primalweight' in label:
if 'adaptive' in label:
return 'PDLP'
if 'Fixed 1e-0' in label:
return r'Fixed PW ($\theta=0$)'
if _BEST_STR in label:
return 'Best per-instance PW'
if _BEST_FIXED in label:
return 'Best fixed PW'
if 'improvements' in label:
if 'vanilla' in label:
return _PDHG
st = ''
if 'restarts' in label:
st = _RESTARTS
if 'scaling' in label:
st = _SCALING
if 'primal weight' in label:
st = _PRIMAL_WEIGHT
if 'step size' in label:
st = _STEPSIZE
if 'pdlp_final' in label:
st = _PRESOLVE
return st
if 'malitskypock' in label:
if _BEST_STR in label:
return 'Best per-instance MP settings'
return 'Best fixed MP setting'
return label
def sanitize_title(title):
title = title.replace('_', ' ').title()
title = title.replace('Lp', 'LP')
title = title.replace('Mip', 'MIP')
title = title.replace('Pdlp', 'PDLP')
title = title.replace('Pdhg', 'PDHG')
title = title.replace('Scs', 'SCS')
title = title.replace('Sgm', 'SGM')
return title
# Generate plots of xaxis vs fraction of solved problems
def solved_problems_vs_xaxis_figs(
dfs,
xaxis,
xlabel,
prefix,
num_instances,
outer_legend=False):
plt.figure()
stats_dfs = {}
for k, df_k in dfs.items():
stats_df = df_k.groupby(xaxis)[xaxis] \
.agg('count') \
.pipe(pd.DataFrame) \
.rename(columns={xaxis: 'frequency'})
stats_df['cum_solved_count'] = stats_df['frequency'].cumsum() / \
num_instances
stats_df = stats_df.drop(columns='frequency').reset_index()
stats_dfs[k] = stats_df
max_xaxis = pd.concat(stats_dfs)[xaxis].max()
for k, df_k in stats_dfs.items():
if df_k.empty:
continue
df_k = df_k.append({xaxis: max_xaxis,
'cum_solved_count': df_k.iloc[-1]['cum_solved_count']},
ignore_index=True)
df_k.reset_index()
plt.plot(df_k[xaxis],
df_k['cum_solved_count'],
label=label_lookup(k))
plt.ylabel('Fraction of problems solved')
plt.xlabel(xlabel)
plt.ylim((0, 1))
plt.ticklabel_format(axis="x", style="sci", scilimits=(0, 0))
plt.title(sanitize_title(prefix))
if outer_legend:
plt.legend(bbox_to_anchor=(1.04, 0.5), loc='center left')
else:
plt.legend(loc='best')
path = os.path.join(FIGS_DIR, f'{prefix}_{xaxis}_v_solved_probs.pdf')
plt.savefig(
path,
bbox_inches="tight")
def gen_solved_problems_plots(df, prefix, num_instances, outer_legend=False):
exps = df['experiment_label'].unique()
dfs = {k: df[df['experiment_label'] == k] for k in exps}
optimal_dfs = {k: v[v['termination_reason'] == OPT]
for (k, v) in dfs.items()}
solved_problems_vs_xaxis_figs(
optimal_dfs,
'cumulative_kkt_matrix_passes',
f'KKT matrix passes SGM{SGM_SHIFT}',
prefix,
num_instances,
outer_legend)
solved_problems_vs_xaxis_figs(
optimal_dfs,
'solve_time_sec',
'Wall-clock time (secs)',
prefix,
num_instances,
outer_legend)
def gen_solved_problems_plots_split_tol(
df, prefix, num_instances, outer_legend=False):
tols = df['tolerance'].unique()
for t in tols:
gen_solved_problems_plots(
df[df['tolerance'] == t], prefix + f'_tol_{t:.0E}', num_instances, outer_legend)
def shifted_geomean(x, shift):
x = x[~np.isnan(x)]
sgm = np.exp(np.sum(np.log(x + shift) / len(x))) - shift
return sgm if sgm > 0 else np.nan
def change_table_font_size(table):
table = table.replace(
'\\begin{table}\n',
'\\begin{table}\n' +
LATEX_FONT_SIZE +
'\n')
table = table.replace('\\caption{', '\\caption{' + LATEX_FONT_SIZE + ' ')
return table
def gen_total_solved_problems_table(df, prefix, par):
solved_probs = df[df['termination_reason'] == OPT] \
.groupby('experiment_label')['experiment_label'] \
.agg('count') \
.pipe(pd.DataFrame) \
.rename(columns={'experiment_label': 'Solved count'})
solved_probs.index.name = 'Experiment'
solved_probs = solved_probs.reset_index()
shift = SGM_SHIFT
kkt_sgm = df.copy()
if par is not None:
kkt_sgm.loc[kkt_sgm['termination_reason'] != OPT,
'cumulative_kkt_matrix_passes'] = par * KKT_PASSES_LIMIT
else:
kkt_sgm.loc[kkt_sgm['termination_reason'] !=
OPT, 'cumulative_kkt_matrix_passes'] = np.nan
# Hack for SCS direct
kkt_sgm.loc[kkt_sgm['experiment_label'].str.contains(
'scs-direct'), 'cumulative_kkt_matrix_passes'] = np.nan
kkt_sgm = kkt_sgm.groupby('experiment_label')['cumulative_kkt_matrix_passes'] \
.agg(lambda _: shifted_geomean(_, shift)) \
.pipe(pd.DataFrame) \
.rename(columns={'cumulative_kkt_matrix_passes':
f'KKT passes SGM{shift}'})
kkt_sgm.index.name = 'Experiment'
kkt_sgm = kkt_sgm.reset_index()
wall_clock = df.copy()
if par is not None:
wall_clock.loc[wall_clock['termination_reason'] !=
OPT, 'solve_time_sec'] = par * TIME_LIMIT_SECS
else:
wall_clock.loc[wall_clock['termination_reason']
!= OPT, 'solve_time_sec'] = np.nan
wall_clock = wall_clock.groupby('experiment_label')['solve_time_sec'] \
.agg(lambda _: shifted_geomean(_, shift)) \
.pipe(pd.DataFrame) \
.rename(columns={'solve_time_sec': f'Solve time secs SGM10'})
wall_clock.index.name = 'Experiment'
wall_clock = wall_clock.reset_index()
output = solved_probs.merge(kkt_sgm).merge(wall_clock)
# rename the labels
for e in output['Experiment']:
output.loc[output['Experiment'] == e, 'Experiment'] = label_lookup(e)
output = output.sort_values('Solved count', ascending=True)
# HACK to fix improvements table ordering and line break
if 'improvements' in prefix:
output['rank'] = output['Experiment'].map(IMPROVEMENTS_ORDER_IDX)
output.sort_values('rank', inplace=True)
output.drop('rank', 1, inplace=True)
to_write = output.copy()
for e in to_write['Experiment']:
to_write.loc[to_write['Experiment'] == e,
'Experiment'] = e.replace('\n', ' ')
else:
to_write = output
table = to_write.to_latex(
float_format="%.1f",
longtable=False,
index=False,
caption=f'Performance statistics: {sanitize_title(prefix)}',
label=f't:solved-probs-{prefix}',
column_format='lccc',
escape=False,
na_rep='-')
table = change_table_font_size(table)
path = os.path.join(TEX_DIR, f'{prefix}_solved_probs_table.tex')
with open(path, "w") as f:
f.write(table)
return output
def gen_total_solved_problems_table_split_tol(df, prefix, par):
outputs = {}
tols = df['tolerance'].unique()
for t in tols:
outputs[t] = gen_total_solved_problems_table(
df[df['tolerance'] == t], prefix + f'_tol_{t:.0E}', par)
return outputs
def plot_loghist(x, nbins):
x = x[~np.isnan(x)]
hist, bins = np.histogram(x, bins=nbins)
logbins = np.logspace(np.log10(bins[0] + 1e-6), np.log10(bins[-1]), nbins)
plt.hist(x, bins=logbins)
plt.xscale('log')
def gen_ratio_histograms_split_tol(df, prefix, par):
tols = df['tolerance'].unique()
for t in tols:
gen_ratio_histograms(df[df['tolerance'] == t],
prefix + f'_tol_{t:.0E}',
'cumulative_kkt_matrix_passes',
f'KKT matrix passes SGM{SGM_SHIFT}',
KKT_PASSES_LIMIT,
par)
gen_ratio_histograms(df[df['tolerance'] == t],
prefix + f'_tol_{t:.0E}',
'solve_time_sec',
'Wall-clock time (secs)',
TIME_LIMIT_SECS,
par)
def gen_ratio_histograms(df, prefix, xaxis, xlabel, limit, par):
assert len(df['experiment_label'].unique()) == 2
(l0, l1) = df['experiment_label'].unique()
def performance_ratio_fn(df, par):
df = df.reset_index()
assert len(df) <= 2
df0 = df[df['experiment_label'] == l0]
df1 = df[df['experiment_label'] == l1]
instance = df.instance_name.unique()
if len(df0) == 1 and df0['termination_reason'].iloc[0] == OPT:
kkt_passes_0 = df0[xaxis].iloc[0]
else:
kkt_passes_0 = par * limit
if len(df1) == 1 and df1['termination_reason'].iloc[0] == OPT:
kkt_passes_1 = df1[xaxis].iloc[0]
else:
kkt_passes_1 = par * limit
# if (df['termination_reason'] != OPT).any():
# return np.nan
return (kkt_passes_0 / kkt_passes_1)
ratios = df.groupby(['instance_name']) \
.apply(lambda _: performance_ratio_fn(_, par)) \
.reset_index(name='ratio')
plt.figure(figsize=(10, 6))
plt.title(sanitize_title(
f'{prefix} {xlabel} {label_lookup(l0)}:{label_lookup(l1)}'))
plot_loghist(ratios['ratio'], min(len(ratios) // 3, 25))
path = os.path.join(
FIGS_DIR,
f'{prefix}_{label_lookup(l0)}_{label_lookup(l1)}_{xaxis}_performance_ratio.pdf')
plt.savefig(path)
table = ratios.to_latex(float_format="%.2f",
longtable=False,
index=False,
caption=f'Performance ratio.',
label=f't:ratio-{prefix}',
column_format='lc',
na_rep='-')
table = change_table_font_size(table)
path = os.path.join(TEX_DIR, f'{prefix}_{label_lookup(l0)}:'
f'{label_lookup(l1)}_{xaxis}_ratio_table.tex')
with open(path, "w") as f:
f.write(table)
# Unsolved problems might be missing from csv, make sure all are accounted for.
def fill_in_missing_problems(df, instances_list):
new_index = pd.Index(instances_list, name='instance_name')
experiments = df['experiment_label'].unique()
dfs = []
for e in experiments:
old_df = df[df['experiment_label'] == e]
tol = old_df['tolerance'].unique()[0]
new_df = old_df.set_index('instance_name').reindex(
new_index).reset_index()
# otherwise these would be nan
new_df['tolerance'] = tol
new_df['experiment_label'] = e
dfs.append(new_df)
return pd.concat(dfs)
def improvements_plot(dfs, prefix, key, ascending):
normalized_dfs = []
for df in dfs:
df[key] /= df[df['Experiment'] == 'PDHG'][key].to_numpy()[0]
normalized_dfs.append(df)
df = pd.concat(normalized_dfs)
fig = plt.figure(figsize=(10, 6))
for tol in df['tolerance'].unique():
_df = df[df['tolerance'] == tol].reset_index(drop=True)
plt.plot(
_df[key].to_numpy(),
linestyle='--',
marker='o',
label=f'tolerance {tol:.0E}')
plt.yscale('log')
plt.ylabel('Normalized ' + key)
plt.title(sanitize_title(prefix))
plt.xticks(range(len(_df['Experiment'])), _df['Experiment'].to_list())
ax = plt.gca()
ax.yaxis.set_major_locator(ticker.LogLocator(subs=[1, 2, 3, 5, 7]))
ax.yaxis.set_major_formatter(
ticker.LogFormatterSciNotation(
labelOnlyBase=False,
minor_thresholds=(4, 2)))
# ax.yaxis.set_major_formatter(ticker.FormatStrFormatter("%.2f")
if len(dfs) > 1:
plt.legend(loc='best')
name = key.replace(' ', '_')
path = os.path.join(FIGS_DIR, f'{prefix}_{name}.pdf')
plt.savefig(
path,
bbox_inches="tight")
def gen_all_improvement_plots(outputs, prefix):
dfs = []
for tol, df in outputs.items():
df = df.copy()
df['tolerance'] = tol
dfs.append(df)
improvements_plot(
dfs,
prefix,
'KKT passes SGM10',
ascending=False)
improvements_plot(
dfs,
prefix,
'Solve time secs SGM10',
ascending=False)
improvements_plot(
dfs,
prefix,
'Solved count',
ascending=True)
# First, make output directories
if not os.path.exists(FIGS_DIR):
os.makedirs(FIGS_DIR)
if not os.path.exists(TEX_DIR):
os.makedirs(TEX_DIR)
# Get clean list of all problems we tested on:
with open('../benchmarking/mip_relaxations_instance_list') as f:
miplib_instances = f.readlines()
miplib_instances = [p.strip() for p in miplib_instances if p[0] != '#']
with open('../benchmarking/lp_benchmark_instance_list') as f:
mittelmann_instances = f.readlines()
mittelmann_instances = [p.strip() for p in mittelmann_instances if p[0] != '#']
# Pull out 'default' (ie best) pdhg implementation to compare against:
df_default = pd.read_csv(
os.path.join(
CSV_DIR,
'miplib_pdhg_enhanced_100k.csv'))
df_default = fill_in_missing_problems(df_default, miplib_instances)
######################################################################
# bisco pdhg vs vanilla pdhg (JOIN DEFAULT)
df = pd.read_csv(os.path.join(CSV_DIR, 'miplib_pdhg_vanilla_100k.csv'))
df = fill_in_missing_problems(df, miplib_instances)
df = pd.concat((df_default, df))
gen_solved_problems_plots_split_tol(df, f'{MIPLIB_STR}', len(miplib_instances))
gen_total_solved_problems_table_split_tol(df, f'{MIPLIB_STR}', PAR)
gen_ratio_histograms_split_tol(df, f'{MIPLIB_STR}', PAR)
######################################################################
df = pd.read_csv(os.path.join(CSV_DIR, 'mittelmann_pdhg_enhanced_100k.csv'))
df = fill_in_missing_problems(df, mittelmann_instances)
df_vanilla = pd.read_csv(
os.path.join(
CSV_DIR,
'mittelmann_improvements_100k.csv'))
df_vanilla = df_vanilla[df_vanilla['enhancements'] == 'vanilla']
df_vanilla = fill_in_missing_problems(df_vanilla, mittelmann_instances)
df = pd.concat((df, df_vanilla))
gen_solved_problems_plots_split_tol(
df, f'{MITTELMANN_STR}', len(mittelmann_instances))
gen_total_solved_problems_table_split_tol(df, f'{MITTELMANN_STR}', PAR)
gen_ratio_histograms_split_tol(df, f'{MITTELMANN_STR}', PAR)
######################################################################
# Scaling results (JOIN DEFAULT)
df = pd.read_csv(os.path.join(CSV_DIR, 'miplib_malitskypock_100k.csv'))
mp_solved = df[df['termination_reason'] == OPT] \
.groupby(['experiment_label', 'tolerance'])['experiment_label'] \
.agg('count') \
.pipe(pd.DataFrame) \
.rename(columns={'experiment_label': 'solved'}) \
.reset_index()
dfs = []
for t in df['tolerance'].unique():
_df = mp_solved[mp_solved['tolerance'] == t]
best_mp_run = _df.loc[_df['solved'].idxmax()]['experiment_label']
dfs.append(df[df['experiment_label'] == best_mp_run])
df_best_ind = fill_in_missing_problems(pd.concat(dfs), miplib_instances)
# Pull out best performing scaling for each instance / tolerance:
df_best_fixed = df[df['termination_reason'] == OPT].reset_index()
best_idxs = df_best_fixed.groupby(['instance_name', 'tolerance'])[
'cumulative_kkt_matrix_passes'].idxmin()
df_best_fixed = df_best_fixed.loc[best_idxs]
for t in df_best_fixed['tolerance'].unique():
# rename the experiment label
df_best_fixed.loc[df_best_fixed['tolerance'] == t, 'experiment_label'] = \
f'malitskypock {_BEST_STR} {t}'
df_best_fixed = fill_in_missing_problems(df_best_fixed, miplib_instances)
df_stepsize = pd.read_csv(os.path.join(CSV_DIR, 'miplib_stepsize_100k.csv'))
df_stepsize = fill_in_missing_problems(df_stepsize, miplib_instances)
df = pd.concat((df_stepsize, df_best_fixed, df_best_ind))
gen_solved_problems_plots_split_tol(
df, f'{MIPLIB_STR}_stepsize', len(miplib_instances), False)
gen_total_solved_problems_table_split_tol(df, f'{MIPLIB_STR}_stepsize', PAR)
######################################################################
# bisco vs mp vs scs on MIPLIB (JOIN PDHG/MP WITH SCS)
df_pdhg_mp = pd.read_csv(os.path.join(CSV_DIR, 'miplib_pdhg_mp_1h.csv'))
df_pdhg_mp = fill_in_missing_problems(df_pdhg_mp, miplib_instances)
df_scs = pd.read_csv(os.path.join(CSV_DIR, 'miplib_scs_1h.csv'))
df_scs = fill_in_missing_problems(df_scs, miplib_instances)
df_pdhg_vanilla = pd.read_csv(os.path.join(
CSV_DIR, 'miplib_pdhg_vanilla_1h.csv'))
df_pdhg_vanilla = fill_in_missing_problems(df_pdhg_vanilla, miplib_instances)
df = pd.concat((df_pdhg_mp, df_pdhg_vanilla, df_scs))
gen_solved_problems_plots_split_tol(
df, f'{MIPLIB_STR}_baselines', len(miplib_instances))
gen_total_solved_problems_table_split_tol(df, f'{MIPLIB_STR}_baselines', PAR)
df_pdhg_scs_dir = pd.concat(
(df_pdhg_mp[df_pdhg_mp['method'] == 'pdhg'], df_scs[df_scs['method'] == 'scs-direct']))
df_pdhg_scs_indir = pd.concat(
(df_pdhg_mp[df_pdhg_mp['method'] == 'pdhg'], df_scs[df_scs['method'] == 'scs-indirect']))
gen_ratio_histograms_split_tol(df_pdhg_mp, f'{MIPLIB_STR}', PAR)
gen_ratio_histograms_split_tol(df_pdhg_scs_indir, f'{MIPLIB_STR}', PAR)
gen_ratio_histograms_split_tol(df_pdhg_scs_dir, f'{MIPLIB_STR}', PAR)
######################################################################
# bisco vs mp vs scs on MITTELMANN (JOIN PDHG/MP WITH SCS)
df_pdhg_mp = pd.read_csv(os.path.join(CSV_DIR, 'mittelmann_pdhg_mp_1h.csv'))
df_pdhg_mp = fill_in_missing_problems(df_pdhg_mp, mittelmann_instances)
df_pdhg_vanilla = pd.read_csv(os.path.join(
CSV_DIR, 'mittelmann_pdhg_vanilla_1h.csv'))
df_pdhg_vanilla = fill_in_missing_problems(df_pdhg_vanilla, miplib_instances)
df_scs = pd.read_csv(os.path.join(CSV_DIR, 'mittelmann_scs_1h.csv'))
df_scs = fill_in_missing_problems(df_scs, mittelmann_instances)
df = pd.concat((df_pdhg_mp, df_pdhg_vanilla, df_scs))
gen_solved_problems_plots_split_tol(
df,
f'{MITTELMANN_STR}_baselines',
len(mittelmann_instances))
gen_total_solved_problems_table_split_tol(
df, f'{MITTELMANN_STR}_baselines', PAR)
df_pdhg_scs_dir = pd.concat(
(df_pdhg_mp[df_pdhg_mp['method'] == 'pdhg'], df_scs[df_scs['method'] == 'scs-direct']))
df_pdhg_scs_indir = pd.concat(
(df_pdhg_mp[df_pdhg_mp['method'] == 'pdhg'], df_scs[df_scs['method'] == 'scs-indirect']))
gen_ratio_histograms_split_tol(df_pdhg_mp, f'{MITTELMANN_STR}', PAR)
gen_ratio_histograms_split_tol(df_pdhg_scs_indir, f'{MITTELMANN_STR}', PAR)
gen_ratio_histograms_split_tol(df_pdhg_scs_dir, f'{MITTELMANN_STR}', PAR)
######################################################################
# bisco presolve vs no presolve (JOIN DEFAULT)
df = pd.read_csv(os.path.join(CSV_DIR, 'miplib_nopresolve_100k.csv'))
df = pd.concat((df_default, df))
gen_solved_problems_plots_split_tol(
df, f'{MIPLIB_STR}_presolve', len(miplib_instances))
gen_total_solved_problems_table_split_tol(df, f'{MIPLIB_STR}_presolve', PAR)
######################################################################
# bisco scaling vs no scaling (NO JOIN DEFAULT)
df = pd.read_csv(os.path.join(CSV_DIR, 'miplib_scaling_100k.csv'))
df = fill_in_missing_problems(df, miplib_instances)
# Pull out best performing scaling for each instance / tolerance:
df_best_per = df[df['termination_reason'] == OPT].reset_index()
best_idxs = df_best_per.groupby(['instance_name', 'tolerance'])[
'cumulative_kkt_matrix_passes'].idxmin()
df_best_per = df_best_per.loc[best_idxs]
for t in df_best_per['tolerance'].unique():
# rename the experiment label
df_best_per.loc[df_best_per['tolerance'] == t, 'experiment_label'] = \
f'scaling {_BEST_STR} {t}'
df_best_per = fill_in_missing_problems(df_best_per, miplib_instances)
# filter out un-needed scaling experiments:
df = pd.concat(df[df['experiment_label'].str.contains(e)]
for e in SCALING_EXPS_TO_USE)
gen_solved_problems_plots_split_tol(
df, f'{MIPLIB_STR}_scaling', len(miplib_instances))
gen_total_solved_problems_table_split_tol(df, f'{MIPLIB_STR}_scaling', PAR)
df = pd.concat((df, df_best_per))
gen_solved_problems_plots_split_tol(
df, f'{MIPLIB_STR}_scaling_with_best_per', len(miplib_instances))
gen_total_solved_problems_table_split_tol(
df, f'{MIPLIB_STR}_scaling_with_best_per', PAR)
######################################################################
# bisco restart vs no restart (NO JOIN DEFAULT)
df = pd.read_csv(os.path.join(CSV_DIR, 'miplib_restarts_100k.csv'))
df = fill_in_missing_problems(df, miplib_instances)
gen_solved_problems_plots_split_tol(
df, f'{MIPLIB_STR}_restarts', len(miplib_instances))
gen_total_solved_problems_table_split_tol(df, f'{MIPLIB_STR}_restarts', PAR)
######################################################################
# bisco primalweight (NO JOIN DEFAULT)
df = pd.read_csv(os.path.join(CSV_DIR, 'miplib_primalweight_100k.csv'))
df = fill_in_missing_problems(df, miplib_instances)
df_fixed = df[df['experiment_label'].str.contains('Fixed')]
pw_solved = df_fixed[df_fixed['termination_reason'] == OPT] \
.groupby(['experiment_label', 'tolerance'])['experiment_label'] \
.agg('count') \
.pipe(pd.DataFrame) \
.rename(columns={'experiment_label': 'solved'}) \
.reset_index()
dfs = []
for t in df_fixed['tolerance'].unique():
_df = pw_solved[pw_solved['tolerance'] == t]
best_mp_run = _df.loc[_df['solved'].idxmax()]['experiment_label']
dfs.append(df_fixed[df_fixed['experiment_label'] == best_mp_run])
df_best_ind = fill_in_missing_problems(pd.concat(dfs), miplib_instances)
for t in df_best_fixed['tolerance'].unique():
# rename the experiment label
df_best_ind.loc[df_best_ind['tolerance'] == t, 'experiment_label'] = \
f'primalweight {_BEST_FIXED} {t}'
# Pull out best performing fixed weight for each instance / tolerance:
df_best_fixed = df_fixed[df_fixed['termination_reason'] == OPT].reset_index()
best_idxs = df_best_fixed.groupby(['instance_name', 'tolerance'])[
'cumulative_kkt_matrix_passes'].idxmin()
df_best_fixed = df_best_fixed.loc[best_idxs]
for t in df_best_fixed['tolerance'].unique():
# rename the experiment label
df_best_fixed.loc[df_best_fixed['tolerance'] == t, 'experiment_label'] = \
f'primalweight {_BEST_STR} {t}'
df_best_fixed = fill_in_missing_problems(df_best_fixed, miplib_instances)
df = pd.concat(df[df['experiment_label'].str.contains(e)]
for e in PRIMALWEIGHT_EXPS_TO_USE)
df = pd.concat((df, df_best_fixed, df_best_ind))
gen_solved_problems_plots_split_tol(
df, f'{MIPLIB_STR}_primalweight', len(miplib_instances), False)
gen_total_solved_problems_table_split_tol(
df, f'{MIPLIB_STR}_primalweight', PAR)
######################################################################
# MIPLIB bisco ablate improvements (JOIN DEFAULT)
df = pd.read_csv(os.path.join(CSV_DIR, 'miplib_improvements_100k.csv'))
df_pdlp = df_default.copy()
for t in df_pdlp['tolerance'].unique():
df_pdlp.loc[df_pdlp['tolerance'] == t,
'experiment_label'] = f'pdlp_final_improvements_{t}'
df = pd.concat((df, df_pdlp.reset_index()))
df = fill_in_missing_problems(df, miplib_instances)
gen_solved_problems_plots_split_tol(
df, f'{MIPLIB_STR}_improvements', len(miplib_instances), True)
outputs = gen_total_solved_problems_table_split_tol(
df, f'{MIPLIB_STR}_improvements', PAR)
gen_all_improvement_plots(outputs, f'{MIPLIB_STR}_improvements')
######################################################################
# MITTELMAN bisco ablate improvements (JOIN DEFAULT)
df_default_mittelmann = pd.read_csv(
os.path.join(
CSV_DIR,
'mittelmann_pdhg_enhanced_100k.csv'))
df_default_mittelmann = fill_in_missing_problems(
df_default_mittelmann, mittelmann_instances)
df = pd.read_csv(os.path.join(CSV_DIR, 'mittelmann_improvements_100k.csv'))
df_pdlp = df_default_mittelmann.copy()
for t in df_pdlp['tolerance'].unique():
df_pdlp.loc[df_pdlp['tolerance'] == t,
'experiment_label'] = f'pdlp_final_improvements_{t}'
df = pd.concat((df, df_pdlp.reset_index()))
df = fill_in_missing_problems(df, mittelmann_instances)
gen_solved_problems_plots_split_tol(
df,
f'{MITTELMANN_STR}_improvements',
len(mittelmann_instances),
True)
outputs = gen_total_solved_problems_table_split_tol(
df, f'{MITTELMANN_STR}_improvements', PAR)
for df in outputs.values():
df['rank'] = df['Experiment'].map(IMPROVEMENTS_ORDER_IDX)
df.sort_values('rank', inplace=True)
df.drop('rank', 1, inplace=True)
gen_all_improvement_plots(outputs, f'{MITTELMANN_STR}_improvements')
| apache-2.0 |
astroJeff/dart_board | paper/scripts/J0513_evidence.py | 1 | 2610 | import sys
import numpy as np
import time
import matplotlib
matplotlib.use('Agg')
sys.path.append("../pyBSE/")
import pybse
import dart_board
from dart_board import sf_history
LMC_metallicity = 0.008
# Load the star formation history
sf_history.lmc.load_sf_history()
def lmc_sfh_J0513(ra, dec, ln_t_b):
""" Star formation history to guarantee walkers stay near J0513. """
ra_J0513 = 78.36775
dec_J0513 = -65.7885278
# Restrict size of viable region to within 2 degrees of J0513
if np.abs(ra - ra_J0513)*np.cos(dec*np.pi/180.0) > 2.0: return -np.inf
if np.abs(dec - dec_J0513) > 2.0: return -np.inf
return sf_history.lmc.prior_lmc(ra, dec, ln_t_b)
# Values for Swift J0513.4-6547 from Coe et al. 2015, MNRAS, 447, 1630
pub = dart_board.DartBoard("NSHMXB", evolve_binary=pybse.evolve, metallicity=LMC_metallicity,
ln_prior_pos=lmc_sfh_J0513,
nwalkers=320, threads=20, thin=10)
pub.aim_darts(N_iterations=10000)
start_time = time.time()
pub.throw_darts(nburn=2, nsteps=150000)
print("Simulation took",time.time()-start_time,"seconds.")
# Since emcee_PT does not have a blobs function, we must include the following calculation
if pub.ntemps is not None:
print("Generating derived values...")
ntemps, nchains, nsteps, nvar = pub.chains.shape
pub.derived = np.zeros(shape=(ntemps, nchains, nsteps, 9))
for i in range(ntemps):
for j in range(nchains):
for k in range(nsteps):
x_i = pub.chains[i,j,k]
ln_M1, ln_M2, ln_a, ecc, v_kick_1, theta_kick_1, phi_kick_1, ra, dec, ln_t = x_i
M1 = np.exp(ln_M1)
M2 = np.exp(ln_M2)
a = np.exp(ln_a)
time = np.exp(ln_t)
P_orb = dart_board.posterior.A_to_P(M1, M2, a)
output = pybse.evolve(M1, M2, P_orb, ecc, v_kick_1, theta_kick_1, phi_kick_1,
v_kick_1, theta_kick_1, phi_kick_1,
time, LMC_metallicity, False)
pub.derived[i,j,k] = np.array([output])
print("...finished.")
# Acceptance fraction
print("Acceptance fractions:",pub.sampler.acceptance_fraction)
# Autocorrelation length
try:
print("Autocorrelation length:", pub.sample.acor)
except:
print("Acceptance fraction is too low.")
# Save outputs
np.save("../data/J0513_evidence_chain.npy", pub.chains)
np.save("../data/J0513_evidence_derived.npy", pub.derived)
np.save("../data/J0513_evidence_lnprobability.npy", pub.lnprobability)
| mit |
kcavagnolo/astroML | book_figures/chapter6/fig_density_estimation.py | 3 | 4407 | """
Comparison of 1D Density Estimators
-----------------------------------
Figure 6.5
A comparison of different density estimation methods for two simulated
one-dimensional data sets (cf. figure 5.21). The generating distribution is
same in both cases and shown as the dotted line; the samples include 500
(top panel) and 5000 (bottom panel) data points (illustrated by vertical bars
at the bottom of each panel). Density estimators are Bayesian blocks
(Section 5.7.2), KDE (Section 6.1.1) and the nearest-neighbor method (eq. 6.15).
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy import stats
from astroML.density_estimation import KNeighborsDensity
from astroML.plotting import hist
# Scikit-learn 0.14 added sklearn.neighbors.KernelDensity, which is a very
# fast kernel density estimator based on a KD Tree. We'll use this if
# available (and raise a warning if it isn't).
try:
from sklearn.neighbors import KernelDensity
use_sklearn_KDE = True
except:
import warnings
warnings.warn("KDE will be removed in astroML version 0.3. Please "
"upgrade to scikit-learn 0.14+ and use "
"sklearn.neighbors.KernelDensity.", DeprecationWarning)
from astroML.density_estimation import KDE
use_sklearn_KDE = False
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Generate our data: a mix of several Cauchy distributions
# this is the same data used in the Bayesian Blocks figure
np.random.seed(0)
N = 10000
mu_gamma_f = [(5, 1.0, 0.1),
(7, 0.5, 0.5),
(9, 0.1, 0.1),
(12, 0.5, 0.2),
(14, 1.0, 0.1)]
true_pdf = lambda x: sum([f * stats.cauchy(mu, gamma).pdf(x)
for (mu, gamma, f) in mu_gamma_f])
x = np.concatenate([stats.cauchy(mu, gamma).rvs(int(f * N))
for (mu, gamma, f) in mu_gamma_f])
np.random.shuffle(x)
x = x[x > -10]
x = x[x < 30]
#------------------------------------------------------------
# plot the results
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(bottom=0.08, top=0.95, right=0.95, hspace=0.1)
N_values = (500, 5000)
subplots = (211, 212)
k_values = (10, 100)
for N, k, subplot in zip(N_values, k_values, subplots):
ax = fig.add_subplot(subplot)
xN = x[:N]
t = np.linspace(-10, 30, 1000)
# Compute density with KDE
if use_sklearn_KDE:
kde = KernelDensity(0.1, kernel='gaussian')
kde.fit(xN[:, None])
dens_kde = np.exp(kde.score_samples(t[:, None]))
else:
kde = KDE('gaussian', h=0.1).fit(xN[:, None])
dens_kde = kde.eval(t[:, None]) / N
# Compute density with Bayesian nearest neighbors
nbrs = KNeighborsDensity('bayesian', n_neighbors=k).fit(xN[:, None])
dens_nbrs = nbrs.eval(t[:, None]) / N
# plot the results
ax.plot(t, true_pdf(t), ':', color='black', zorder=3,
label="Generating Distribution")
ax.plot(xN, -0.005 * np.ones(len(xN)), '|k')
hist(xN, bins='blocks', ax=ax, normed=True, zorder=1,
histtype='stepfilled', color='k', alpha=0.2,
label="Bayesian Blocks")
ax.plot(t, dens_nbrs, '-', lw=1.5, color='gray', zorder=2,
label="Nearest Neighbors (k=%i)" % k)
ax.plot(t, dens_kde, '-', color='black', zorder=3,
label="Kernel Density (h=0.1)")
# label the plot
ax.text(0.02, 0.95, "%i points" % N, ha='left', va='top',
transform=ax.transAxes)
ax.set_ylabel('$p(x)$')
ax.legend(loc='upper right')
if subplot == 212:
ax.set_xlabel('$x$')
ax.set_xlim(0, 20)
ax.set_ylim(-0.01, 0.4001)
plt.show()
| bsd-2-clause |
nmayorov/scikit-learn | examples/classification/plot_classification_probability.py | 138 | 2871 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting, and Gaussian process classification.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
kernel = 1.0 * RBF([1.0, 1.0]) # for GPC
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'),
'GPC': GaussianProcessClassifier(kernel)
}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
rjl09c/ysp2017 | katiecodeorderverification.py | 1 | 9000 | import yt
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import pylab
from yt.analysis_modules.halo_finding.api import HaloFinder
from pylab import*
from numpy import ma
from numpy import linalg as LA
#deriveswith respect to x
def derivx(vel,xcoords):
distance = xcoords[1][0] + xcoords[0][1] - 2*xcoords[0][0]
velxdx = np.zeros((200,200))
for i in range(len(vel)):
for x in range(len(vel)):
if 0 < i < len(vel) - 1:
velxdx[i,x] = ((-1/2) * vel[i-1][x]) + ((1/2) * vel[i+1][x])
elif i == 0:
velxdx[i,x] = (((-3/2) * vel[i][x]) + (2 * vel[i+1][x]) + ((-1/2) * vel[i+2][x]))
elif i == len(vel) - 1:
velxdx[i,x] = ((-3/2) * vel[i][x]) + (2 * vel[i-1][x]) + ((-1/2) * vel[i-2][x])
return velxdx/distance
#derives vel with respect to y
def derivy(vel,xcoords):
distance = xcoords[1][0] + xcoords[0][1] - 2*xcoords[0][0]
velydy = np.zeros((200,200))
for i in range(len(vel)):
for x in range(len(vel)):
if 0 < x < len(vel) - 1:
velydy[i,x] = (((-1/2) * vel[i][x-1]) + ((1/2) * vel[i][x+1]))
elif x == 0:
velydy[i,x] = (((-3/2)*vel[i][x]) + (2*vel[i][x+1]) + ((-1/2) * vel[i][x + 2]))
elif x == len(vel) - 1:
velydy[i,x] = (((-3/2)*vel[i][x]) + (2*vel[i][x-1]) + ((-1/2) * vel[i][x-2]))
return velydy/distance
#calculating l2 norm for csv files
def normFile1(ycoords, velx, velx1):
e1 = 0
norm = np.zeros((100,100))
for i in range(len(ycoords)):
for j in range(len(ycoords)):
norm [i][j] = (abs(float(velx[i][j])-float(velx1[i][j])))
e1 = e1 + (abs((velx[i][j])-float(velx1[i][j])))
e1 = (e1/len(velx1))
#return norm
return e1
#calculating l2 norm for csv files
def normFile2(ycoords, velx, velx1):
e2 = 0
norm = np.zeros((200,200))
for i in range(len(ycoords)):
for j in range(len(ycoords)):
norm [i][j] = (abs(float(velx[i][j])-float(velx1[i][j])))
e2 = e2 + (abs(float(velx[i][j])-float(velx1[i][j])))
e2 = (e2/len(velx1))
#return norm
return e2
#second derivative of vel with respect to x
def deriv2x(vel,xcoords):
distance = xcoords[1][0] - xcoords[0][0]
velxdx = np.zeros((100,100))
for i in range(len(vel)):
for x in range(len(vel)):
if 0 < i < len(vel) - 1:
velxdx[i,x] = (vel[i-1][x]) + (-2 * vel[i][x]) + (vel[i+1][x])
elif i == 0:
velxdx[i,x] = ((2 * vel[i][x]) + (-5 * vel[i+1][x]) + (4* vel[i+2][x]) + (-1 * vel[i+3][x]))
elif i == len(vel) - 1:
velxdx[i,x] = ((-3/2) * vel[i][x]) + (2 * vel[i-1][x]) + ((-1/2) * vel[i-2][x])
return velxdx/distance
#second derivative of vel with respect to y
def deriv2y(vel,xcoords):
distance = xcoords[1][0] - xcoords[0][0]
velydy = np.zeros((100,100))
for i in range(len(vel)):
for x in range(len(vel)):
if 0 < x < len(vel) - 1:
velydy[i,x] = ((vel[i][x-1]) + (-2 * vel[i][x]) + (vel[i][x+1]))
elif x == 0:
velydy[i,x] = (((2)*vel[i][x]) + (-5 * vel[i][x+1]) + ((4) * vel[i][x+2]) + (-1 * vel[i][x+3]))
elif x == len(vel) - 1:
velydy[i,x] = (((2) * vel[i][x]) + (-5 * vel[i][x - 1]) + ((4) * vel[i][x-2]) + (-1 * vel[i][x-3]))
return velydy/distance
#second derivative of a mixed derivative
def mixed_deriv(xcoords, ycoords, vel):
distx = xcoords[1][0] - xcoords[0][0]
disty = ycoords[0][1] - ycoords[0][0]
mixed = np.zeros((100,100))
veldx = derivx(vel, xcoords)
veldy = derivy(veldx, xcoords) #takes deriv of vel with respect to x and derives that in the y direction
for i in range(len(vel)):
for x in range(len(vel)):
if 0 < i < len(vel) - 1 and 0 < x < len(vel) - 1:
mixed[i][x] = ((vel[i+1][x+1]) - (vel[i+1][x-1]) - (vel[i-1][x+1]) + (vel[i-1][x-1]))/(4*distx*disty)
#if on edges derives with respect to x first
elif i == 0 or i == len(vel) - 1 or x == 0 or x == len(vel) - 1:
mixed[i][x]=veldy[i][x]
return mixed
#create hessian matrix for each point
def hess(xcoords, ycoords, vel):
veldx = deriv2x(vel, xcoords) #retrieves the second derivatives of the velocity in the x direction
veldy = deriv2y(vel, xcoords) #retrieves the second derivatives of the velocity in the y direction
mixed = mixed_deriv(xcoords, ycoords, vel) #retrieves the second mixed derivatives of the velocity
hessian = np.zeros((2,2))
allhessian = [[[] for j in range(100)] for i in range(100)]
for j in range(len(veldx)):
for k in range(len(veldx)):
for i in range(len(hessian)):
for x in range(len(hessian)):
if i == 0 and x == 1:
hessian[i,x] = mixed[j,k]
hessian[i+1][x-1] = mixed[j,k]
elif x == 0 and i == 0:
hessian[i,x] = veldx[j,k]
elif x == 1 and i == 1:
hessian[i,x] = veldy[j,k]
allhessian[j][k] = hessian
allhessian = np.array(allhessian)
return allhessian
#find determinant
def determinant(allhessian):
deters = np.zeros((100,100))
for j in range(len(allhessian)):
for k in range(len(allhessian)):
x = allhessian[j,k]
deters[j,k] = (x[0,0]*x[1,1]) - (x[1,0]*x[0,1])
return deters
#find magnitude
def magnitude(velx,vely, xcoords):
mag = np.zeros((100,100))
yderiv = derivy(vely, xcoords)
xderiv = derivx(velx, xcoords)
for i in range(len(xderiv)):
for x in range(len(xderiv)):
mag[i][x] = (((yderiv[i,x]**2) + (xderiv[i,x]**2))**.5)
return mag
#finds extrema and saddlepoints
def extrema(allhessian, velx, vely, xcoords):
deters = determinant(allhessian)
extrem = np.zeros((100,100))
mag = magnitude(velx, vely, xcoords)
for j in range(len(extrem)):
for k in range(len(extrem)):
if mag[j][k] == 0:
if deters[j,k] < 0:
extrem[j, k] = -1
elif deters[j,k] == 0:
extrem[j,k] = 0
else:
x = allhessian[j,k]
if deter[j,k] > 0 and x[0,0] > 0:
extem[j, k] = -2
elif deter[j,k] > 0 and x[0,0] < 0:
extrem[j, k] = 2
return extrem
#creates jacobia matrix for each point
def jacobian(xcoords,velx, vely):
xx = derivx(velx, xcoords)
xy = derivy(velx, xcoords)
yx = derivx(vely, xcoords)
yy = derivy(vely, xcoords)
jacob = np.zeros ((2,2))
alljacob = [[[] for j in range(100)] for i in range(100)]
for j in range(len(alljacob)):
for k in range(len(alljacob)):
for i in range(len(jacob)):
for c in range(len(jacob)):
if c == 0 and i == 0:
jacob [i][c] = xx[j][k]
elif c == 1 and i == 0:
jacob[i][c] = xy[j][k]
elif c ==1 and i == 1:
jacob[i][c] = yy[j][k]
alljacob[j][k] = jacob
alljacob = np.array(alljacob)
return alljacob
#obtains eigenvalues for all points' jacobian matrices and then checks the extrema
def evals(alljacob):
eigen = [[[] for j in range(100)] for i in range(100)]
extrema = np.zeros((100,100))
for j in range(len(alljacob)):
for k in range(len(alljacob)):
x = alljacob[j,k]
eigen[j][k] = LA.eigvalsh(x)
y = eigen [j][k]
if y[0]>0 and y[1]>0:
extrema[j,k] = 2
elif y[0]<0 and y[1]<0:
extrema[j,k] = -2
elif y[0]*y[1]<0:
extrema[j,k] = 3
return extrema
#main function
def main():
zvals1 = np.loadtxt("Grid1.csv", dtype ='float', delimiter = ',')
zvals2 = np.loadtxt("Grid2.csv", dtype ='float', delimiter = ',')
xyvals1 = np.loadtxt("xy1.csv", dtype ='float', delimiter = ',')
xyvals2 = np.loadtxt("xy2.csv", dtype ='float', delimiter = ',')
dx1vals = np.loadtxt("Dx1.csv", dtype ='float', delimiter = ',')
dy1vals = np.loadtxt("Dy1.csv", dtype ='float', delimiter = ',')
dx2vals = np.loadtxt("Dx2.csv", dtype ='float', delimiter = ',')
dy2vals = np.loadtxt("Dy2.csv", dtype ='float', delimiter = ',')
#norms grid1
x = np.meshgrid(xyvals1, xyvals1)[0]
y = np.meshgrid(xyvals1, xyvals1)[1]
normnewfilex = normFile1(y, dx1vals, derivx(zvals1, x))
normnewfiley = normFile1(y, dy1vals, derivy(zvals1, x))
#norms grid2
x1 = np.meshgrid(xyvals2, xyvals2)[0]
y1 = np.meshgrid(xyvals2, xyvals2)[1]
normnewfilex1 = normFile2(y1, dx2vals, derivx(zvals2, x1))
normnewfiley1 = normFile2(y1, dy2vals, derivy(zvals2, x1))
#graphs of norms
#grid1 norms
'''
plt.figure()
plt.scatter(x, y, c = normnewfilex, marker= 'o',edgecolor='none')
plt.colorbar()
plt.show()
plt.figure()
plt.scatter(x, y, c = normnewfiley, marker= 'o',edgecolor='none')
plt.colorbar()
plt.show()
'''
#grid2 norms
'''
plt.figure()
plt.scatter(x1, y1, c = normnewfilex1, marker= 'o',edgecolor='none')
plt.colorbar()
plt.show()
plt.figure()
plt.scatter(x1, y1, c = normnewfiley1, marker= 'o',edgecolor='none')
plt.colorbar()
plt.show()
'''
#dx error norms as a function of h
dxnorm = log(normnewfilex/normnewfilex1)/log((xyvals1[1]-xyvals1[0])/(xyvals2[1]-xyvals2[0]))
print(abs(dxnorm))
#dy error norms as a function of h
dynorm = log(normnewfiley/normnewfiley1)/log((xyvals1[1]-xyvals1[0])/(xyvals2[1]-xyvals2[0]))
print(abs(dynorm))
#prints extrema for file1 using jacobian method
print(evals(jacobian(x, zvals1, zvals1)))
#prints extrema for file1 using hessian method and second derivatives (which are missing at the moment)
#(extrema(hess(x, y, zvals1), zvals1, zvals1, x))
main()
| gpl-3.0 |
chapman-phys227-2016s/cw-3-classwork-team | sequence_limits.py | 1 | 3024 | #! /usr/bin/env python
"""
File: sequence_limits.py
Copyright (c) 2016 Austin Ayers
License: MIT
Course: PHYS227
Assignment: A. 1
Date: Feb 11, 2016
Email: ayers111@mail.chapman.edu
Name: Austin Ayers
Description: Determines the limit of a sequence
"""
import numpy as np
import matplotlib.pyplot as plt
def seq_a(n):
"""
Returns an element in a sequence given the n value
"""
return ((7.0+(1.0/(float(n)+1.0)))/(3.0-(1.0/(float(n)+1.0)**2)))
def seq_c(n):
return np.sin(2.0**(-1 *float(n)))/(2.0**(-1 *float(n)))
def part_a():
"""
Writes out the sequence for N = 100, and finds the value as n -> inf
"""
sequence = []
for i in range(0,100,2):
print i
print seq_a(i)
sequence.append(seq_a(i))
print "\n\n"
print "The series converges to: 7/3 or 2.3333..., and a_N was: " + str(seq_a(100)) + " and the difference was: " + str((seq_a(100)-2.33333333333333333))
return sequence
def limit(seq):
"""
Determines if a series has a limit and returns it, if the series has no limit it outputs None
"""
cond = True
for i in range(1,len(seq)-1):
if not (abs(seq[i]) - abs(seq[i+1]) < abs(seq[i-1]) - abs(seq[i])):
print "None"
cond = False
break
if(cond):
print "The limit exists (to this algorithm's knowledge)"
if(seq[-1] - seq[-2] < 0.01):
return seq[-1]
def part_b():
"""
tests limit(seq) if it works for the sequence in part a
"""
seq_a = part_a()
print "The limit is roughly: " + str(limit(seq_a))
def part_c():
"""
tests limit(seq) if it works for the sequence in part c
"""
sequence = []
for i in range(500):
sequence.append(seq_c(i))
print "The limit is roughly: " + str(limit(sequence))
def sin_x(x):
return np.sin(x)
def D(f, x, N):
"""
takes a function f(x), a value x, and the number N and returns the sequence for 0,N
"""
sequence = []
for i in range(N):
sequence.append((f(float(x)+(2.0**(-1 *float(i))))-f(x))/(2.0**(-1 *float(i))))
return sequence
def part_d():
seq_d = D(sin_x, 0, 80)
print str(limit(seq_d))
print "notice this fails because the function is oscillatory in behavior (sin(x))"
plt.plot(range(80), seq_d, 'go')
plt.show
def part_e():
seq_e = D(sin_x, np.pi, 80)
print str(limit(seq_e))
plt.plot(range(80), seq_e, 'go')
plt.show
def part_f():
print "the computations for x = pi go wrong for large N because sin(pi) = 0 and 2 ** (-n) approaches 0 as well, so the numerator and denominator both go to 0 and that usually leads to problems."
def run():
"""
Runs the entire program with parts
"""
print "Part (a): "
part_a()
print
print "Part (b): "
part_b()
print
print "Part (c): "
print
part_c()
print "Part (d): "
print
part_d()
print "Part (e): "
print
part_e()
print "Part (f): "
print
part_f()
print "Finished" | mit |
flaviovdf/aflux | aflux/dataio.py | 1 | 3090 | #-*- coding: utf8
from __future__ import division, print_function
from collections import defaultdict
from collections import OrderedDict
import numpy as np
import pandas as pd
def save_model(out_fpath, model):
store = pd.HDFStore(out_fpath, 'w')
for model_key in model:
model_val = model[model_key]
if type(model_val) == np.ndarray:
store[model_key] = pd.DataFrame(model_val)
else:
store[model_key] = pd.DataFrame(model_val.items(), \
columns=['Name', 'Id'])
store.close()
def initialize_trace(trace_fpath, num_topics, burn_in):
count_zh_dict = defaultdict(int)
count_sz_dict = defaultdict(int)
count_dz_dict = defaultdict(int)
count_z_dict = defaultdict(int)
count_h_dict = defaultdict(int)
hyper2id = OrderedDict()
source2id = OrderedDict()
dest2id = OrderedDict()
Trace = []
with open(trace_fpath, 'r') as trace_file:
for i, line in enumerate(trace_file):
hyper_str, source_str, dest_str, c = line.strip().split('\t')
c = int(c)
for _ in xrange(c):
if hyper_str not in hyper2id:
hyper2id[hyper_str] = len(hyper2id)
if source_str not in source2id:
source2id[source_str] = len(source2id)
if dest_str not in dest2id:
dest2id[dest_str] = len(dest2id)
h = hyper2id[hyper_str]
s = source2id[source_str]
d = dest2id[dest_str]
z = np.random.randint(num_topics)
count_zh_dict[z, h] += 1
count_sz_dict[s, z] += 1
count_dz_dict[d, z] += 1
count_z_dict[z] += 1
count_h_dict[h] += 1
Trace.append([h, s, d, z])
Trace = np.asarray(Trace, dtype='i4', order='C')
nh = len(hyper2id)
ns = len(source2id)
nd = len(dest2id)
nz = num_topics
Count_zh = np.zeros(shape=(nz, nh), dtype='i4')
Count_sz = np.zeros(shape=(ns, nz), dtype='i4')
Count_dz = np.zeros(shape=(nd, nz), dtype='i4')
count_h = np.zeros(shape=(nh,), dtype='i4')
count_z = np.zeros(shape=(nz,), dtype='i4')
for z in xrange(Count_zh.shape[0]):
count_z[z] = count_z_dict[z]
for h in xrange(Count_zh.shape[1]):
count_h[h] = count_h_dict[h]
Count_zh[z, h] = count_zh_dict[z, h]
for s in xrange(Count_sz.shape[0]):
Count_sz[s, z] = count_sz_dict[s, z]
for d in xrange(Count_dz.shape[0]):
Count_dz[d, z] = count_dz_dict[d, z]
prob_topics_aux = np.zeros(nz, dtype='f8')
Theta_zh = np.zeros(shape=(nz, nh), dtype='f8')
Psi_sz = np.zeros(shape=(ns, nz), dtype='f8')
Psi_dz = np.zeros(shape=(nd, nz), dtype='f8')
return Trace, Count_zh, Count_sz, Count_dz, count_h, count_z, \
prob_topics_aux, Theta_zh, Psi_sz, Psi_dz, hyper2id, source2id, dest2id
| bsd-3-clause |
ronalcc/zipline | zipline/sources/simulated.py | 18 | 5422 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
import six
import numpy as np
from datetime import timedelta
import pandas as pd
from zipline.sources.data_source import DataSource
from zipline.utils import tradingcalendar as calendar_nyse
from zipline.gens.utils import hash_args
from zipline.finance.trading import TradingEnvironment
class RandomWalkSource(DataSource):
"""RandomWalkSource that emits events with prices that follow a
random walk. Will generate valid datetimes that match market hours
of the supplied calendar and can generate emit events with
user-defined frequencies (e.g. minutely).
"""
VALID_FREQS = frozenset(('daily', 'minute'))
def __init__(self, start_prices=None, freq='minute', start=None,
end=None, drift=0.1, sd=0.1, calendar=calendar_nyse):
"""
:Arguments:
start_prices : dict
sid -> starting price.
Default: {0: 100, 1: 500}
freq : str <default='minute'>
Emits events according to freq.
Can be 'daily' or 'minute'
start : datetime <default=start of calendar>
Start dt to emit events.
end : datetime <default=end of calendar>
End dt until to which emit events.
drift: float <default=0.1>
Constant drift of the price series.
sd: float <default=0.1>
Standard deviation of the price series.
calendar : calendar object <default: NYSE>
Calendar to use.
See zipline.utils for different choices.
:Example:
# Assumes you have instantiated your Algorithm
# as myalgo.
myalgo = MyAlgo()
source = RandomWalkSource()
myalgo.run(source)
"""
# Hash_value for downstream sorting.
self.arg_string = hash_args(start_prices, freq, start, end,
calendar.__name__)
if freq not in self.VALID_FREQS:
raise ValueError('%s not in %s' % (freq, self.VALID_FREQS))
self.freq = freq
if start_prices is None:
self.start_prices = {0: 100,
1: 500}
else:
self.start_prices = start_prices
self.calendar = calendar
if start is None:
self.start = calendar.start
else:
self.start = start
if end is None:
self.end = calendar.end_base
else:
self.end = end
self.drift = drift
self.sd = sd
self.sids = self.start_prices.keys()
TradingEnvironment.instance().update_asset_finder(
identifiers=self.sids
)
self.open_and_closes = \
calendar.open_and_closes[self.start:self.end]
self._raw_data = None
@property
def instance_hash(self):
return self.arg_string
@property
def mapping(self):
return {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
'open_price': (float, 'open_price'),
'high': (float, 'high'),
'low': (float, 'low'),
}
def _gen_next_step(self, x):
x += np.random.randn() * self.sd + self.drift
return max(x, 0.1)
def _gen_events(self, cur_prices, current_dt):
for sid, price in six.iteritems(cur_prices):
cur_prices[sid] = self._gen_next_step(cur_prices[sid])
event = {
'dt': current_dt,
'sid': sid,
'price': cur_prices[sid],
'volume': np.random.randint(1e5, 1e6),
'open_price': cur_prices[sid],
'high': cur_prices[sid] + .1,
'low': cur_prices[sid] - .1,
}
yield event
def raw_data_gen(self):
cur_prices = copy(self.start_prices)
for _, (open_dt, close_dt) in self.open_and_closes.iterrows():
current_dt = copy(open_dt)
if self.freq == 'minute':
# Emit minutely trade signals from open to close
while current_dt <= close_dt:
for event in self._gen_events(cur_prices, current_dt):
yield event
current_dt += timedelta(minutes=1)
elif self.freq == 'daily':
# Emit one signal per day at close
for event in self._gen_events(
cur_prices, pd.tslib.normalize_date(close_dt)):
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
| apache-2.0 |
plotly/python-api | packages/python/plotly/_plotly_utils/tests/validators/test_pandas_series_input.py | 1 | 4531 | import pytest
import numpy as np
import pandas as pd
from datetime import datetime
from _plotly_utils.basevalidators import (
NumberValidator,
IntegerValidator,
DataArrayValidator,
ColorValidator,
)
@pytest.fixture
def data_array_validator(request):
return DataArrayValidator("prop", "parent")
@pytest.fixture
def integer_validator(request):
return IntegerValidator("prop", "parent", array_ok=True)
@pytest.fixture
def number_validator(request):
return NumberValidator("prop", "parent", array_ok=True)
@pytest.fixture
def color_validator(request):
return ColorValidator("prop", "parent", array_ok=True, colorscale_path="")
@pytest.fixture(
params=[
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
]
)
def numeric_dtype(request):
return request.param
@pytest.fixture(params=[pd.Series, pd.Index])
def pandas_type(request):
return request.param
@pytest.fixture
def numeric_pandas(request, pandas_type, numeric_dtype):
return pandas_type(np.arange(10), dtype=numeric_dtype)
@pytest.fixture
def color_object_pandas(request, pandas_type):
return pandas_type(["blue", "green", "red"] * 3, dtype="object")
@pytest.fixture
def color_categorical_pandas(request, pandas_type):
return pandas_type(pd.Categorical(["blue", "green", "red"] * 3))
@pytest.fixture
def dates_array(request):
return np.array(
[
datetime(year=2013, month=10, day=10),
datetime(year=2013, month=11, day=10),
datetime(year=2013, month=12, day=10),
datetime(year=2014, month=1, day=10),
datetime(year=2014, month=2, day=10),
]
)
@pytest.fixture
def datetime_pandas(request, pandas_type, dates_array):
return pandas_type(dates_array)
def test_numeric_validator_numeric_pandas(number_validator, numeric_pandas):
res = number_validator.validate_coerce(numeric_pandas)
# Check type
assert isinstance(res, np.ndarray)
# Check dtype
assert res.dtype == numeric_pandas.dtype
# Check values
np.testing.assert_array_equal(res, numeric_pandas)
def test_integer_validator_numeric_pandas(integer_validator, numeric_pandas):
res = integer_validator.validate_coerce(numeric_pandas)
# Check type
assert isinstance(res, np.ndarray)
# Check dtype
if numeric_pandas.dtype.kind in ("u", "i"):
# Integer and unsigned integer dtype unchanged
assert res.dtype == numeric_pandas.dtype
else:
# Float datatypes converted to default integer type of int32
assert res.dtype == "int32"
# Check values
np.testing.assert_array_equal(res, numeric_pandas)
def test_data_array_validator(data_array_validator, numeric_pandas):
res = data_array_validator.validate_coerce(numeric_pandas)
# Check type
assert isinstance(res, np.ndarray)
# Check dtype
assert res.dtype == numeric_pandas.dtype
# Check values
np.testing.assert_array_equal(res, numeric_pandas)
def test_color_validator_numeric(color_validator, numeric_pandas):
res = color_validator.validate_coerce(numeric_pandas)
# Check type
assert isinstance(res, np.ndarray)
# Check dtype
assert res.dtype == numeric_pandas.dtype
# Check values
np.testing.assert_array_equal(res, numeric_pandas)
def test_color_validator_object(color_validator, color_object_pandas):
res = color_validator.validate_coerce(color_object_pandas)
# Check type
assert isinstance(res, np.ndarray)
# Check dtype
assert res.dtype == "object"
# Check values
np.testing.assert_array_equal(res, color_object_pandas)
def test_color_validator_categorical(color_validator, color_categorical_pandas):
res = color_validator.validate_coerce(color_categorical_pandas)
# Check type
assert color_categorical_pandas.dtype == "category"
assert isinstance(res, np.ndarray)
# Check dtype
assert res.dtype == "object"
# Check values
np.testing.assert_array_equal(res, np.array(color_categorical_pandas))
def test_data_array_validator_dates(data_array_validator, datetime_pandas, dates_array):
res = data_array_validator.validate_coerce(datetime_pandas)
# Check type
assert isinstance(res, np.ndarray)
# Check dtype
assert res.dtype == "object"
# Check values
np.testing.assert_array_equal(res, dates_array)
| mit |
huongttlan/statsmodels | statsmodels/graphics/dotplots.py | 31 | 18190 | import numpy as np
from statsmodels.compat import range
from . import utils
def dot_plot(points, intervals=None, lines=None, sections=None,
styles=None, marker_props=None, line_props=None,
split_names=None, section_order=None, line_order=None,
stacked=False, styles_order=None, striped=False,
horizontal=True, show_names="both",
fmt_left_name=None, fmt_right_name=None,
show_section_titles=None, ax=None):
"""
Produce a dotplot similar in style to those in Cleveland's
"Visualizing Data" book. These are also known as "forest plots".
Parameters
----------
points : array_like
The quantitative values to be plotted as markers.
intervals : array_like
The intervals to be plotted around the points. The elements
of `intervals` are either scalars or sequences of length 2. A
scalar indicates the half width of a symmetric interval. A
sequence of length 2 contains the left and right half-widths
(respectively) of a nonsymmetric interval. If None, no
intervals are drawn.
lines : array_like
A grouping variable indicating which points/intervals are
drawn on a common line. If None, each point/interval appears
on its own line.
sections : array_like
A grouping variable indicating which lines are grouped into
sections. If None, everything is drawn in a single section.
styles : array_like
A grouping label defining the plotting style of the markers
and intervals.
marker_props : dict
A dictionary mapping style codes (the values in `styles`) to
dictionaries defining key/value pairs to be passed as keyword
arguments to `plot` when plotting markers. Useful keyword
arguments are "color", "marker", and "ms" (marker size).
line_props : dict
A dictionary mapping style codes (the values in `styles`) to
dictionaries defining key/value pairs to be passed as keyword
arguments to `plot` when plotting interval lines. Useful
keyword arguments are "color", "linestyle", "solid_capstyle",
and "linewidth".
split_names : string
If not None, this is used to split the values of `lines` into
substrings that are drawn in the left and right margins,
respectively. If None, the values of `lines` are drawn in the
left margin.
section_order : array_like
The section labels in the order in which they appear in the
dotplot.
line_order : array_like
The line labels in the order in which they appear in the
dotplot.
stacked : boolean
If True, when multiple points or intervals are drawn on the
same line, they are offset from each other.
styles_order : array_like
If stacked=True, this is the order in which the point styles
on a given line are drawn from top to bottom (if horizontal
is True) or from left to right (if horiontal is False). If
None (default), the order is lexical.
striped : boolean
If True, every other line is enclosed in a shaded box.
horizontal : boolean
If True (default), the lines are drawn horizontally, otherwise
they are drawn vertically.
show_names : string
Determines whether labels (names) are shown in the left and/or
right margins (top/bottom margins if `horizontal` is True).
If `both`, labels are drawn in both margins, if 'left', labels
are drawn in the left or top margin. If `right`, labels are
drawn in the right or bottom margin.
fmt_left_name : function
The left/top margin names are passed through this function
before drawing on the plot.
fmt_right_name : function
The right/bottom marginnames are passed through this function
before drawing on the plot.
show_section_titles : bool or None
If None, section titles are drawn only if there is more than
one section. If False/True, section titles are never/always
drawn, respectively.
ax : matplotlib.axes
The axes on which the dotplot is drawn. If None, a new axes
is created.
Returns
-------
fig : Figure
The figure given by `ax.figure` or a new instance.
Notes
-----
`points`, `intervals`, `lines`, `sections`, `styles` must all have
the same length whenever present.
Examples
--------
This is a simple dotplot with one point per line:
>>> dot_plot(points=point_values)
This dotplot has labels on the lines (if elements in
`label_values` are repeated, the corresponding points appear on
the same line):
>>> dot_plot(points=point_values, lines=label_values)
References
----------
* Cleveland, William S. (1993). "Visualizing Data". Hobart
Press.
* Jacoby, William G. (2006) "The Dot Plot: A Graphical Display
for Labeled Quantitative Values." The Political Methodologist
14(1): 6-14.
"""
import matplotlib.transforms as transforms
fig, ax = utils.create_mpl_ax(ax)
# Convert to numpy arrays if that is not what we are given.
points = np.asarray(points)
asarray_or_none = lambda x : None if x is None else np.asarray(x)
intervals = asarray_or_none(intervals)
lines = asarray_or_none(lines)
sections = asarray_or_none(sections)
styles = asarray_or_none(styles)
# Total number of points
npoint = len(points)
# Set default line values if needed
if lines is None:
lines = np.arange(npoint)
# Set default section values if needed
if sections is None:
sections = np.zeros(npoint)
# Set default style values if needed
if styles is None:
styles = np.zeros(npoint)
# The vertical space (in inches) for a section title
section_title_space = 0.5
# The number of sections
nsect = len(set(sections))
if section_order is not None:
nsect = len(set(section_order))
# The number of section titles
if show_section_titles == False:
draw_section_titles = False
nsect_title = 0
elif show_section_titles == True:
draw_section_titles = True
nsect_title = nsect
else:
draw_section_titles = nsect > 1
nsect_title = nsect if nsect > 1 else 0
# The total vertical space devoted to section titles.
section_space_total = section_title_space * nsect_title
# Add a bit of room so that points that fall at the axis limits
# are not cut in half.
ax.set_xmargin(0.02)
ax.set_ymargin(0.02)
if section_order is None:
lines0 = list(set(sections))
lines0.sort()
else:
lines0 = section_order
if line_order is None:
lines1 = list(set(lines))
lines1.sort()
else:
lines1 = line_order
# A map from (section,line) codes to index positions.
lines_map = {}
for i in range(npoint):
if section_order is not None and sections[i] not in section_order:
continue
if line_order is not None and lines[i] not in line_order:
continue
ky = (sections[i], lines[i])
if ky not in lines_map:
lines_map[ky] = []
lines_map[ky].append(i)
# Get the size of the axes on the parent figure in inches
bbox = ax.get_window_extent().transformed(
fig.dpi_scale_trans.inverted())
awidth, aheight = bbox.width, bbox.height
# The number of lines in the plot.
nrows = len(lines_map)
# The positions of the lowest and highest guideline in axes
# coordinates (for horizontal dotplots), or the leftmost and
# rightmost guidelines (for vertical dotplots).
bottom, top = 0, 1
if horizontal:
# x coordinate is data, y coordinate is axes
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
else:
# x coordinate is axes, y coordinate is data
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
# Space used for a section title, in axes coordinates
title_space_axes = section_title_space / aheight
# Space between lines
if horizontal:
dpos = (top - bottom - nsect_title*title_space_axes) /\
float(nrows)
else:
dpos = (top - bottom) / float(nrows)
# Determine the spacing for stacked points
if styles_order is not None:
style_codes = styles_order
else:
style_codes = list(set(styles))
style_codes.sort()
# Order is top to bottom for horizontal plots, so need to
# flip.
if horizontal:
style_codes = style_codes[::-1]
# nval is the maximum number of points on one line.
nval = len(style_codes)
if nval > 1:
stackd = dpos / (2.5*(float(nval)-1))
else:
stackd = 0.
# Map from style code to its integer position
#style_codes_map = {x: style_codes.index(x) for x in style_codes}
# python 2.6 compat version:
style_codes_map = dict((x, style_codes.index(x)) for x in style_codes)
# Setup default marker styles
colors = ["r", "g", "b", "y", "k", "purple", "orange"]
if marker_props is None:
#marker_props = {x: {} for x in style_codes}
# python 2.6 compat version:
marker_props = dict((x, {}) for x in style_codes)
for j in range(nval):
sc = style_codes[j]
if "color" not in marker_props[sc]:
marker_props[sc]["color"] = colors[j % len(colors)]
if "marker" not in marker_props[sc]:
marker_props[sc]["marker"] = "o"
if "ms" not in marker_props[sc]:
marker_props[sc]["ms"] = 10 if stackd == 0 else 6
# Setup default line styles
if line_props is None:
#line_props = {x: {} for x in style_codes}
# python 2.6 compat version:
line_props = dict((x, {}) for x in style_codes)
for j in range(nval):
sc = style_codes[j]
if "color" not in line_props[sc]:
line_props[sc]["color"] = "grey"
if "linewidth" not in line_props[sc]:
line_props[sc]["linewidth"] = 2 if stackd > 0 else 8
if horizontal:
# The vertical position of the first line.
pos = top - dpos/2 if nsect == 1 else top
else:
# The horizontal position of the first line.
pos = bottom + dpos/2
# Points that have already been labeled
labeled = set()
# Positions of the y axis grid lines
ticks = []
# Loop through the sections
for k0 in lines0:
# Draw a section title
if draw_section_titles:
if horizontal:
y0 = pos + dpos/2 if k0 == lines0[0] else pos
ax.fill_between((0, 1), (y0,y0),
(pos-0.7*title_space_axes,
pos-0.7*title_space_axes),
color='darkgrey',
transform=ax.transAxes,
zorder=1)
txt = ax.text(0.5, pos - 0.35*title_space_axes, k0,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes)
txt.set_fontweight("bold")
pos -= title_space_axes
else:
m = len([k for k in lines_map if k[0] == k0])
ax.fill_between((pos-dpos/2+0.01,
pos+(m-1)*dpos+dpos/2-0.01),
(1.01,1.01), (1.06,1.06),
color='darkgrey',
transform=ax.transAxes,
zorder=1, clip_on=False)
txt = ax.text(pos + (m-1)*dpos/2, 1.02, k0,
horizontalalignment='center',
verticalalignment='bottom',
transform=ax.transAxes)
txt.set_fontweight("bold")
jrow = 0
for k1 in lines1:
# No data to plot
if (k0, k1) not in lines_map:
continue
# Draw the guideline
if horizontal:
ax.axhline(pos, color='grey')
else:
ax.axvline(pos, color='grey')
# Set up the labels
if split_names is not None:
us = k1.split(split_names)
if len(us) >= 2:
left_label, right_label = us[0], us[1]
else:
left_label, right_label = k1, None
else:
left_label, right_label = k1, None
if fmt_left_name is not None:
left_label = fmt_left_name(left_label)
if fmt_right_name is not None:
right_label = fmt_right_name(right_label)
# Draw the stripe
if striped and jrow % 2 == 0:
if horizontal:
ax.fill_between((0, 1), (pos-dpos/2, pos-dpos/2),
(pos+dpos/2, pos+dpos/2),
color='lightgrey',
transform=ax.transAxes,
zorder=0)
else:
ax.fill_between((pos-dpos/2, pos+dpos/2),
(0, 0), (1, 1),
color='lightgrey',
transform=ax.transAxes,
zorder=0)
jrow += 1
# Draw the left margin label
if show_names.lower() in ("left", "both"):
if horizontal:
ax.text(-0.1/awidth, pos, left_label,
horizontalalignment="right",
verticalalignment='center',
transform=ax.transAxes,
family='monospace')
else:
ax.text(pos, -0.1/aheight, left_label,
horizontalalignment="center",
verticalalignment='top',
transform=ax.transAxes,
family='monospace')
# Draw the right margin label
if show_names.lower() in ("right", "both"):
if right_label is not None:
if horizontal:
ax.text(1 + 0.1/awidth, pos, right_label,
horizontalalignment="left",
verticalalignment='center',
transform=ax.transAxes,
family='monospace')
else:
ax.text(pos, 1 + 0.1/aheight, right_label,
horizontalalignment="center",
verticalalignment='bottom',
transform=ax.transAxes,
family='monospace')
# Save the vertical position so that we can place the
# tick marks
ticks.append(pos)
# Loop over the points in one line
for ji,jp in enumerate(lines_map[(k0,k1)]):
# Calculate the vertical offset
yo = 0
if stacked:
yo = -dpos/5 + style_codes_map[styles[jp]]*stackd
pt = points[jp]
# Plot the interval
if intervals is not None:
# Symmetric interval
if np.isscalar(intervals[jp]):
lcb, ucb = pt - intervals[jp],\
pt + intervals[jp]
# Nonsymmetric interval
else:
lcb, ucb = pt - intervals[jp][0],\
pt + intervals[jp][1]
# Draw the interval
if horizontal:
ax.plot([lcb, ucb], [pos+yo, pos+yo], '-',
transform=trans,
**line_props[styles[jp]])
else:
ax.plot([pos+yo, pos+yo], [lcb, ucb], '-',
transform=trans,
**line_props[styles[jp]])
# Plot the point
sl = styles[jp]
sll = sl if sl not in labeled else None
labeled.add(sl)
if horizontal:
ax.plot([pt,], [pos+yo,], ls='None',
transform=trans, label=sll,
**marker_props[sl])
else:
ax.plot([pos+yo,], [pt,], ls='None',
transform=trans, label=sll,
**marker_props[sl])
if horizontal:
pos -= dpos
else:
pos += dpos
# Set up the axis
if horizontal:
ax.xaxis.set_ticks_position("bottom")
ax.yaxis.set_ticks_position("none")
ax.set_yticklabels([])
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_position(('axes', -0.1/aheight))
ax.set_ylim(0, 1)
ax.yaxis.set_ticks(ticks)
ax.autoscale_view(scaley=False, tight=True)
else:
ax.yaxis.set_ticks_position("left")
ax.xaxis.set_ticks_position("none")
ax.set_xticklabels([])
ax.spines['bottom'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['left'].set_position(('axes', -0.1/awidth))
ax.set_xlim(0, 1)
ax.xaxis.set_ticks(ticks)
ax.autoscale_view(scalex=False, tight=True)
return fig
| bsd-3-clause |
JingJunYin/tensorflow | tensorflow/contrib/training/python/training/feeding_queue_runner_test.py | 76 | 5052 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues.feeding_functions import _enqueue_data as enqueue_data
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
lizardsystem/lizard-damage | lizard_damage/results.py | 1 | 10390 | """Process results for a DamageEvent.
The idea is that during a calculation a ResultCollector object is kept
around, and generated results (like land use images for a given tile) can
be "thrown to" it."""
import glob
import os
import shutil
import subprocess
import tempfile
import zipfile
from PIL import Image
from pyproj import Proj
import matplotlib as mpl
import numpy as np
ZIP_FILENAME = 'result.zip'
RD = str(
"+proj=sterea +lat_0=52.15616055555555 +lon_0=5.38763888888889 +k=0.999908"
" +x_0=155000 +y_0=463000 +ellps=bessel +units=m +towgs84=565.2369,"
"50.0087,465.658,-0.406857330322398,0.350732676542563,-1.8703473836068,"
"4.0812 +no_defs <>"
)
WGS84 = str('+proj=latlong +datum=WGS84')
rd_proj = Proj(RD)
wgs84_proj = Proj(WGS84)
CDICT_HEIGHT = {
'red': ((0.0, 51. / 256, 51. / 256),
(0.5, 237. / 256, 237. / 256),
(1.0, 83. / 256, 83. / 256)),
'green': ((0.0, 114. / 256, 114. / 256),
(0.5, 245. / 256, 245. / 256),
(1.0, 83. / 256, 83. / 256)),
'blue': ((0.0, 54. / 256, 54. / 256),
(0.5, 170. / 256, 170. / 256),
(1.0, 83. / 256, 83. / 256)),
}
CDICT_WATER_DEPTH = {
'red': ((0.0, 170. / 256, 170. / 256),
(0.5, 65. / 256, 65. / 256),
(1.0, 4. / 256, 4. / 256)),
'green': ((0.0, 200. / 256, 200. / 256),
(0.5, 120. / 256, 120. / 256),
(1.0, 65. / 256, 65. / 256)),
'blue': ((0.0, 255. / 256, 255. / 256),
(0.5, 221. / 256, 221. / 256),
(1.0, 176. / 256, 176. / 256)),
}
class ResultCollector(object):
def __init__(self, workdir, all_leaves, logger):
"""Start a new ResultCollector.
Workdir is a damage event's workdir. All result files are placed
in that directory, or subdirectories of it.
all_leaves is an iterable of (ahn_name, extent) tuples that
is mainly used to know what the entire extent is going to be
in advance.
All files are placed in the damage event's directory.
Results that are tracked:
- Files to be added to a result zipfile
- Landuse tiles
- Water depth tiles
- Height tiles
- Damage tiles.
The damage tiles are added as ASC's to the result zipfile.
All four types of tile are saved as images for showing using Google.
The damage tiles are somewhat special in that they will first be
saved, and need to have roads drawn in them afterwards.
"""
self.workdir = workdir
self.tempdir = os.path.join(self.workdir, 'tmp')
if not os.path.exists(self.tempdir):
os.makedirs(self.tempdir)
self.logger = logger
# We want to know all leaves in advance, so we can make images for
# the entire region, or sections of it, without having to let them
# correspond 1:1 to the tiles.
self.all_leaves = {
ahn_name: extent for (ahn_name, extent) in all_leaves
}
self.riskmap_data = []
# Create an empty zipfile, throw away the old one if needed.
self.zipfile = mk(self.workdir, ZIP_FILENAME)
if os.path.exists(self.zipfile):
os.remove(self.zipfile)
self.mins = {'depth': float("+inf"), 'height': float("+inf")}
self.maxes = {'depth': float("-inf"), 'height': float("-inf")}
def png_path(self, result_type, tile):
return mk(self.workdir, result_type, "{}.png".format(tile))
def save_ma(
self, tile, masked_array, result_type, ds_template=None,
repetition_time=None):
# self.save_ma_to_geoimage(tile, masked_array, result_type)
# ^^^ disable because google maps api no longer supports this,
# and because tmp takes excessive space because of this
# (uncompressed) storage.
if result_type == 'damage':
filename = self.save_ma_to_asc(
tile, masked_array, result_type, ds_template, repetition_time)
if repetition_time is not None:
# TODO (Reinout wants to know where this is used. The file is
# deleted after adding it to the zipfile, so....)
self.riskmap_data.append(
(tile, repetition_time, filename))
def save_ma_to_asc(
self, tile, masked_array, result_type, ds_template,
repetition_time):
from lizard_damage import calc
if repetition_time is not None:
filename = 'schade_{}_T{}.asc'.format(tile, repetition_time)
else:
filename = 'schade_{}.asc'.format(tile)
filename = os.path.join(self.tempdir, filename)
calc.write_result(
name=filename,
ma_result=masked_array,
ds_template=ds_template)
return filename
def save_csv_data_for_zipfile(self, zipname, csvdata):
from lizard_damage import calc
filename = calc.mkstemp_and_close()
calc.write_table(name=filename, **csvdata)
self.save_file_for_zipfile(filename, zipname, delete_after=True)
def save_file_for_zipfile(self, file_path, zipname, delete_after=False):
with zipfile.ZipFile(self.zipfile, 'a', zipfile.ZIP_DEFLATED) as myzip:
self.logger.info('zipping %s...' % zipname)
myzip.write(file_path, zipname)
if delete_after:
self.logger.info(
'removing %r (%s in arc)' % (file_path, zipname))
os.remove(file_path)
def build_damage_geotiff(self):
orig_dir = os.getcwd()
os.chdir(self.tempdir)
asc_files = glob.glob('*.asc')
if not asc_files:
self.logger.info(
"No asc files as input, not writing out a geotiff.")
for asc_file in asc_files:
tiff_file = asc_file.replace('.asc', '.tiff')
cmd = ("gdal_translate %s %s "
"-co compress=deflate -co tiled=yes "
"-ot float32 -a_srs EPSG:28992")
os.system(cmd % (asc_file, tiff_file))
self.save_file_for_zipfile(tiff_file, tiff_file)
file_with_tiff_filenames = tempfile.NamedTemporaryFile()
tiff_files = glob.glob('*.tiff')
for tiff_file in tiff_files:
file_with_tiff_filenames.write(tiff_file + "\n")
file_with_tiff_filenames.flush()
vrt_file = 'schade.vrt'
cmd = "gdalbuildvrt -input_file_list %s %s" % (
file_with_tiff_filenames.name, vrt_file)
self.logger.debug(cmd)
os.system(cmd)
file_with_tiff_filenames.close() # Deletes the temporary file
if os.path.exists(vrt_file):
self.save_file_for_zipfile(vrt_file, vrt_file)
os.chdir(orig_dir)
def finalize(self):
"""Make final version of the data:
- Warp all generated geoimages to WGS84.
"""
self.extents = {}
for tile in self.all_leaves:
for result_type in ('height', 'depth'):
tmp_filename = os.path.join(
self.tempdir, "{}.{}".format(tile, result_type))
if os.path.exists(tmp_filename):
masked_array = np.load(tmp_filename)
os.remove(tmp_filename)
normalize = mpl.colors.Normalize(
vmin=self.mins[result_type],
vmax=self.maxes[result_type])
if result_type == 'height':
cdict = CDICT_HEIGHT
elif result_type == 'depth':
cdict = CDICT_WATER_DEPTH
colormap = mpl.colors.LinearSegmentedColormap(
'something', cdict, N=1024)
rgba = colormap(normalize(masked_array), bytes=True)
if result_type == 'depth':
rgba[:, :, 3] = np.where(
np.greater(masked_array.filled(0), 0), 255, 0)
filename = self.png_path(result_type, tile)
Image.fromarray(rgba).save(filename, 'PNG')
write_extent_pgw(filename.replace('.png', '.pgw'),
self.all_leaves[tile])
for result_type in ('damage', 'landuse', 'height', 'depth'):
png = self.png_path(result_type, tile)
if os.path.exists(png):
result_extent = rd_to_wgs84(png)
self.extents[(tile, result_type)] = result_extent
def cleanup_tmp_dir(self):
shutil.rmtree(self.tempdir)
def all_images(self):
"""Generate path and extent of all created images. Path is relative
to the workdir. Only use after finalizing."""
for ((tile, result_type), extent) in self.extents.items():
png_path = self.png_path(result_type, tile)
if os.path.exists(png_path):
relative = png_path[len(self.workdir):]
yield (result_type, relative, extent)
def write_extent_pgw(name, extent):
"""write pgw file:
0.5
0.000
0.000
0.5
<x ul corner>
<y ul corner>
extent is a 4-tuple
"""
f = open(name, 'w')
f.write('0.5\n0.000\n0.000\n-0.5\n')
f.write('%f\n%f' % (min(extent[0], extent[2]), max(extent[1], extent[3])))
f.close()
def mk(*parts):
"""Combine parts using os.path.join, then make sure the directory
exists."""
path = os.path.join(*parts)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
return path
def rd_to_wgs84(png):
from lizard_damage import models
# Step 1: warp using gdalwarp to lon/lat in .tif
# Warp png file, output is tif.
tif = png.replace('.png', '.tif')
subprocess.call([
'gdalwarp', png, tif,
'-t_srs', "+proj=latlong +datum=WGS84", '-s_srs', RD.strip()])
# Step 2: convert .tif back to .png
im = Image.open(tif)
im.save(png, 'PNG')
# Step 3: We can't save this WGS84 as a PGW (or at least, we don't).
# Remove the old PGW and return this extent.
result_extent = models.extent_from_geotiff(tif)
os.remove(png.replace('.png', '.pgw'))
# Step 4: remove TIF
os.remove(tif)
return result_extent
| gpl-3.0 |
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/matplotlib/container.py | 11 | 3370 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.cbook as cbook
class Container(tuple):
"""
Base class for containers.
"""
def __repr__(self):
return "<Container object of %d artists>" % (len(self))
def __new__(cls, *kl, **kwargs):
return tuple.__new__(cls, kl[0])
def __init__(self, kl, label=None):
self.eventson = False # fire events only if eventson
self._oid = 0 # an observer id
self._propobservers = {} # a dict from oids to funcs
self._remove_method = None
self.set_label(label)
def set_remove_method(self, f):
self._remove_method = f
def remove(self):
for c in self:
c.remove()
if self._remove_method:
self._remove_method(self)
def __getstate__(self):
d = self.__dict__.copy()
# remove the unpicklable remove method, this will get re-added on load
# (by the axes) if the artist lives on an axes.
d['_remove_method'] = None
return d
def get_label(self):
"""
Get the label used for this artist in the legend.
"""
return self._label
def set_label(self, s):
"""
Set the label to *s* for auto legend.
ACCEPTS: string or anything printable with '%s' conversion.
"""
if s is not None:
self._label = '%s' % (s, )
else:
self._label = None
self.pchanged()
def add_callback(self, func):
"""
Adds a callback function that will be called whenever one of
the :class:`Artist`'s properties changes.
Returns an *id* that is useful for removing the callback with
:meth:`remove_callback` later.
"""
oid = self._oid
self._propobservers[oid] = func
self._oid += 1
return oid
def remove_callback(self, oid):
"""
Remove a callback based on its *id*.
.. seealso::
:meth:`add_callback`
For adding callbacks
"""
try:
del self._propobservers[oid]
except KeyError:
pass
def pchanged(self):
"""
Fire an event when property changed, calling all of the
registered callbacks.
"""
for oid, func in list(six.iteritems(self._propobservers)):
func(self)
def get_children(self):
return list(cbook.flatten(self))
class BarContainer(Container):
def __init__(self, patches, errorbar=None, **kwargs):
self.patches = patches
self.errorbar = errorbar
Container.__init__(self, patches, **kwargs)
class ErrorbarContainer(Container):
def __init__(self, lines, has_xerr=False, has_yerr=False, **kwargs):
self.lines = lines
self.has_xerr = has_xerr
self.has_yerr = has_yerr
Container.__init__(self, lines, **kwargs)
class StemContainer(Container):
def __init__(self, markerline_stemlines_baseline, **kwargs):
markerline, stemlines, baseline = markerline_stemlines_baseline
self.markerline = markerline
self.stemlines = stemlines
self.baseline = baseline
Container.__init__(self, markerline_stemlines_baseline, **kwargs)
| gpl-2.0 |
ajamesl/VectorTarget | plot.py | 1 | 2392 | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.patches import FancyArrowPatch
import numpy as np
from mpl_toolkits.mplot3d import proj3d
x = []
y = []
z = []
#Reading two sets of x, y, z coordinates from a txt file
with open('data.txt', 'r') as csvfile:
coords = csv.reader(csvfile, delimiter=',')
for row in coords:
x.append(int(row[0]))
y.append(int(row[1]))
z.append(int(row[2]))
#Class defining x, y, z vectors and the vector arrow-head appearance/size
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
#Defines figure as 3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#Axis range & labels
ax.set_xlim([0, 10])
ax.set_ylim([0, 10])
ax.set_zlim([0, 10])
ax.set_xlabel('x axis')
ax.set_ylabel('y axis')
ax.set_zlabel('z axis')
a = Arrow3D(x, y, z, mutation_scale=20, lw=1, arrowstyle="->",
color="b")
#Draw line on plot
ax.add_artist(a)
plt.show()
#class Arrow3D(FancyArrowPatch):
# def __init__(self, xs, ys, zs, *args, **kwargs):
# FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
# self._verts3d = xs, ys, zs
#
# def draw(self, renderer):
# xs3d, ys3d, zs3d = self._verts3d
# xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
# self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
# FancyArrowPatch.draw(self, renderer)
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
#ax.set_xlim([0, 10])
#ax.set_ylim([0, 10])
#ax.set_zlim([0, 10])
#ax.set_xlabel('x axis')
#ax.set_ylabel('y axis')
#ax.set_zlabel('z axis')
#a = Arrow3D([5, 10], [0, 5], [3, 6], mutation_scale=20, lw=1, arrowstyle="->",
# color="b")
#b = Arrow3D([0, 10], [0, 2], [2, 4], mutation_scale=20, lw=1, arrowstyle="->",
# color="r")
#c = Arrow3D([5, 0], [10, 5], [10, 5], mutation_scale=20, lw=1, arrowstyle="->",
# color="g")
#ax.add_artist(a)
#ax.add_artist(b)
#ax.add_artist(c)
#plt.show()
| mit |
russel1237/scikit-learn | sklearn/linear_model/bayes.py | 220 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
syl20bnr/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/projections/geo.py | 69 | 19738 | import math
import numpy as np
import numpy.ma as ma
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.artist import kwdocd
from matplotlib.axes import Axes
from matplotlib import cbook
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, NullLocator, FixedLocator, NullFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper
class GeoAxes(Axes):
"""
An abstract base class for geographic projections
"""
class ThetaFormatter(Formatter):
"""
Used to format the theta tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __init__(self, round_to=1.0):
self._round_to = round_to
def __call__(self, x, pos=None):
degrees = (x / np.pi) * 180.0
degrees = round(degrees / self._round_to) * self._round_to
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % degrees
else:
return u"%0.0f\u00b0" % degrees
RESOLUTION = 75
def cla(self):
Axes.cla(self)
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.grid(rcParams['axes.grid'])
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
def _set_lim_and_transforms(self):
# A (possibly non-linear) projection on the (already scaled) data
self.transProjection = self._get_core_transform(self.RESOLUTION)
self.transAffine = self._get_affine_transform()
self.transAxes = BboxTransformTo(self.bbox)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# This is the transform for longitude ticks.
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, 4.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# This is the transform for latitude ticks.
yaxis_stretch = Affine2D().scale(np.pi * 2.0, 1.0).translate(-np.pi, 0.0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space + \
self.transAffine + \
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def _get_affine_transform(self):
transform = self._get_core_transform(1)
xscale, _ = transform.transform_point((np.pi, 0))
_, yscale = transform.transform_point((0, np.pi / 2.0))
return Affine2D() \
.scale(0.5 / xscale, 0.5 / yscale) \
.translate(0.5, 0.5)
def get_xaxis_transform(self):
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self):
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pad):
return self._yaxis_text2_transform, 'center', 'left'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
set_xscale = set_yscale
def set_xlim(self, *args, **kwargs):
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
set_ylim = set_xlim
def format_coord(self, long, lat):
'return a format string formatting the coordinate'
long = long * (180.0 / np.pi)
lat = lat * (180.0 / np.pi)
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if long >= 0.0:
ew = 'E'
else:
ew = 'W'
return u'%f\u00b0%s, %f\u00b0%s' % (abs(lat), ns, abs(long), ew)
def set_longitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (360.0 / degrees) + 1
self.xaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi, np.pi, number, True)[1:-1]))
self._logitude_degrees = degrees
self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_latitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (180.0 / degrees) + 1
self.yaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi / 2.0, np.pi / 2.0, number, True)[1:-1]))
self._latitude_degrees = degrees
self.yaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_longitude_grid_ends(self, degrees):
"""
Set the latitude(s) at which to stop drawing the longitude grids.
"""
self._longitude_cap = degrees * (np.pi / 180.0)
self._xaxis_pretransform \
.clear() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself.
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
class AitoffAxes(GeoAxes):
name = 'aitoff'
class AitoffTransform(Transform):
"""
The base Aitoff transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
alpha = np.arccos(cos_latitude * np.cos(half_long))
# Mask this array, or we'll get divide-by-zero errors
alpha = ma.masked_where(alpha == 0.0, alpha)
# We want unnormalized sinc. numpy.sinc gives us normalized
sinc_alpha = ma.sin(alpha) / alpha
x = (cos_latitude * np.sin(half_long)) / sinc_alpha
y = (np.sin(latitude) / sinc_alpha)
x.set_fill_value(0.0)
y.set_fill_value(0.0)
return np.concatenate((x.filled(), y.filled()), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return AitoffAxes.InvertedAitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedAitoffTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return AitoffAxes.AitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.AitoffTransform(resolution)
class HammerAxes(GeoAxes):
name = 'hammer'
class HammerTransform(Transform):
"""
The base Hammer transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Hammer transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Hammer space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
sqrt2 = np.sqrt(2.0)
alpha = 1.0 + cos_latitude * np.cos(half_long)
x = (2.0 * sqrt2) * (cos_latitude * np.sin(half_long)) / alpha
y = (sqrt2 * np.sin(latitude)) / alpha
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return HammerAxes.InvertedHammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedHammerTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
quarter_x = 0.25 * x
half_y = 0.5 * y
z = np.sqrt(1.0 - quarter_x*quarter_x - half_y*half_y)
longitude = 2 * np.arctan((z*x) / (2.0 * (2.0*z*z - 1.0)))
latitude = np.arcsin(y*z)
return np.concatenate((longitude, latitude), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return HammerAxes.HammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.HammerTransform(resolution)
class MollweideAxes(GeoAxes):
name = 'mollweide'
class MollweideTransform(Transform):
"""
The base Mollweide transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Mollweide transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Mollweide space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
aux = 2.0 * np.arcsin((2.0 * latitude) / np.pi)
x = (2.0 * np.sqrt(2.0) * longitude * np.cos(aux)) / np.pi
y = (np.sqrt(2.0) * np.sin(aux))
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MollweideAxes.InvertedMollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedMollweideTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MollweideAxes.MollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.MollweideTransform(resolution)
class LambertAxes(GeoAxes):
name = 'lambert'
class LambertTransform(Transform):
"""
The base Lambert transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
"""
Create a new Lambert transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Lambert space.
"""
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
cos_lat = np.cos(latitude)
sin_lat = np.sin(latitude)
diff_long = longitude - clong
cos_diff_long = np.cos(diff_long)
inner_k = (1.0 +
np.sin(clat)*sin_lat +
np.cos(clat)*cos_lat*cos_diff_long)
# Prevent divide-by-zero problems
inner_k = np.where(inner_k == 0.0, 1e-15, inner_k)
k = np.sqrt(2.0 / inner_k)
x = k*cos_lat*np.sin(diff_long)
y = k*(np.cos(clat)*sin_lat -
np.sin(clat)*cos_lat*cos_diff_long)
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return LambertAxes.InvertedLambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedLambertTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
p = np.sqrt(x*x + y*y)
p = np.where(p == 0.0, 1e-9, p)
c = 2.0 * np.arcsin(0.5 * p)
sin_c = np.sin(c)
cos_c = np.cos(c)
lat = np.arcsin(cos_c*np.sin(clat) +
((y*sin_c*np.cos(clat)) / p))
long = clong + np.arctan(
(x*sin_c) / (p*np.cos(clat)*cos_c - y*np.sin(clat)*sin_c))
return np.concatenate((long, lat), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return LambertAxes.LambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
self._center_longitude = kwargs.pop("center_longitude", 0.0)
self._center_latitude = kwargs.pop("center_latitude", 0.0)
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
def cla(self):
GeoAxes.cla(self)
self.yaxis.set_major_formatter(NullFormatter())
def _get_core_transform(self, resolution):
return self.LambertTransform(
self._center_longitude,
self._center_latitude,
resolution)
def _get_affine_transform(self):
return Affine2D() \
.scale(0.25) \
.translate(0.5, 0.5)
| gpl-3.0 |
arcyfelix/Courses | 18-11-22-Deep-Learning-with-PyTorch/02-Introduction to PyTorch/helper.py | 1 | 2719 | import matplotlib.pyplot as plt
import numpy as np
from torch import nn, optim
from torch.autograd import Variable
def test_network(net, trainloader):
criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)
dataiter = iter(trainloader)
images, labels = dataiter.next()
# Create Variables for the inputs and targets
inputs = Variable(images)
targets = Variable(images)
# Clear the gradients from all Variables
optimizer.zero_grad()
# Forward pass, then backward pass, then update weights
output = net.forward(inputs)
loss = criterion(output, targets)
loss.backward()
optimizer.step()
return True
def imshow(image, ax=None, title=None, normalize=True):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
image = image.numpy().transpose((1, 2, 0))
if normalize:
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
image = np.clip(image, 0, 1)
ax.imshow(image)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.tick_params(axis='both', length=0)
ax.set_xticklabels('')
ax.set_yticklabels('')
return ax
def view_recon(img, recon):
''' Function for displaying an image (as a PyTorch Tensor) and its
reconstruction also a PyTorch Tensor
'''
fig, axes = plt.subplots(ncols=2, sharex=True, sharey=True)
axes[0].imshow(img.numpy().squeeze())
axes[1].imshow(recon.data.numpy().squeeze())
for ax in axes:
ax.axis('off')
ax.set_adjustable('box-forced')
def view_classify(img, ps, version="MNIST"):
''' Function for viewing an image and it's predicted classes.
'''
ps = ps.data.numpy().squeeze()
fig, (ax1, ax2) = plt.subplots(figsize=(6,9), ncols=2)
ax1.imshow(img.resize_(1, 28, 28).numpy().squeeze())
ax1.axis('off')
ax2.barh(np.arange(10), ps)
ax2.set_aspect(0.1)
ax2.set_yticks(np.arange(10))
if version == "MNIST":
ax2.set_yticklabels(np.arange(10))
elif version == "Fashion":
ax2.set_yticklabels(['T-shirt/top',
'Trouser',
'Pullover',
'Dress',
'Coat',
'Sandal',
'Shirt',
'Sneaker',
'Bag',
'Ankle Boot'], size='small');
ax2.set_title('Class Probability')
ax2.set_xlim(0, 1.1)
plt.tight_layout()
| apache-2.0 |
xuewei4d/scikit-learn | sklearn/manifold/_isomap.py | 11 | 9747 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils.validation import check_is_fitted
from ..utils.validation import _deprecate_positional_args
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(TransformerMixin, BaseEstimator):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : int, default=5
number of neighbors to consider for each point.
n_components : int, default=2
number of coordinates for the manifold
eigen_solver : {'auto', 'arpack', 'dense'}, default='auto'
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float, default=0
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : int, default=None
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : {'auto', 'FW', 'D'}, default='auto'
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : {'auto', 'brute', 'kd_tree', 'ball_tree'}, \
default='auto'
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
n_jobs : int or None, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
metric : string, or callable, default="minkowski"
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by :func:`sklearn.metrics.pairwise_distances` for
its metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a :term:`Glossary <sparse graph>`.
.. versionadded:: 0.22
p : int, default=2
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
.. versionadded:: 0.22
metric_params : dict, default=None
Additional keyword arguments for the metric function.
.. versionadded:: 0.22
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
:class:`~sklearn.decomposition.KernelPCA` object used to implement the
embedding.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import Isomap
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> embedding = Isomap(n_components=2)
>>> X_transformed = embedding.fit_transform(X[:100])
>>> X_transformed.shape
(100, 2)
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
@_deprecate_positional_args
def __init__(self, *, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto', n_jobs=None, metric='minkowski',
p=2, metric_params=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
self.metric = metric
self.p = p
self.metric_params = metric_params
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(n_neighbors=self.n_neighbors,
algorithm=self.neighbors_algorithm,
metric=self.metric, p=self.p,
metric_params=self.metric_params,
n_jobs=self.n_jobs)
self.nbrs_.fit(X)
self.n_features_in_ = self.nbrs_.n_features_in_
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter,
n_jobs=self.n_jobs)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
metric=self.metric, p=self.p,
metric_params=self.metric_params,
mode='distance', n_jobs=self.n_jobs)
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-----
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse graph, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse graph, precomputed tree, or NearestNeighbors
object.
y : Ignored
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X : {array-like, sparse graph, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X : array-like, shape (n_queries, n_features)
If neighbors_algorithm='precomputed', X is assumed to be a
distance matrix or a sparse graph of shape
(n_queries, n_samples_fit).
Returns
-------
X_new : array-like, shape (n_queries, n_components)
"""
check_is_fitted(self)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
# Create the graph of shortest distances from X to
# training data via the nearest neighbors of X.
# This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
n_samples_fit = self.nbrs_.n_samples_fit_
n_queries = distances.shape[0]
G_X = np.zeros((n_queries, n_samples_fit))
for i in range(n_queries):
G_X[i] = np.min(self.dist_matrix_[indices[i]] +
distances[i][:, None], 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
bgossele/geminicassandra | geminicassandra/scripts/gemini_install.py | 1 | 15544 | #!/usr/bin/env python
"""Installer for geminicassandra: a lightweight db framework for disease and population genetics.
https://github.com/bgossele/geminicassandra
Handles installation of:
- Required third party software
- Required Python libraries
- Gemini application
- Associated data files
Requires: Python 2.7 (or 2.6 and argparse), git, and compilers (gcc, g++)
Run gemini_install.py -h for usage.
"""
import argparse
import platform
import os
import shutil
import subprocess
import sys
import urllib2
remotes = {"requirements_pip":
"https://raw.github.com/bgossele/geminicassandra/master/requirements.txt",
"requirements_conda":
"",
"versioned_installations":
"https://raw.githubusercontent.com/bgossele/geminicassandra/master/versioning/",
"cloudbiolinux":
"https://github.com/chapmanb/cloudbiolinux.git",
"geminicassandra":
"https://github.com/bgossele/geminicassandra.git",
"anaconda":
"http://repo.continuum.io/miniconda/Miniconda-3.7.0-%s-x86_64.sh"}
def main(args):
check_dependencies()
work_dir = os.path.join(os.getcwd(), "tmpgemini_install")
if not os.path.exists(work_dir):
os.makedirs(work_dir)
os.chdir(work_dir)
if args.gemini_version != 'latest':
requirements_pip = os.path.join(remotes['versioned_installations'],
args.gemini_version, 'requirements_pip.txt')
requirements_conda = os.path.join(remotes['versioned_installations'],
args.gemini_version, 'requirements_conda.txt')
try:
urllib2.urlopen(requirements_pip)
except:
sys.exit('Gemini version %s could not be found. Try the latest version.' % args.gemini_version)
remotes.update({'requirements_pip': requirements_pip, 'requirements_conda': requirements_conda})
print "Installing isolated base python installation"
make_dirs(args)
anaconda = install_anaconda_python(args, remotes)
print "Installing geminicassandra..."
install_conda_pkgs(anaconda, remotes, args)
gemini = install_gemini(anaconda, remotes, args.datadir, args.tooldir, args.sudo)
if args.install_tools:
cbl = get_cloudbiolinux(remotes["cloudbiolinux"])
fabricrc = write_fabricrc(cbl["fabricrc"], args.tooldir, args.datadir, args.sudo)
print "Installing associated tools..."
install_tools(gemini["fab"], cbl["tool_fabfile"], fabricrc)
os.chdir(work_dir)
install_data(gemini["python"], gemini["data_script"], args)
os.chdir(work_dir)
test_script = install_testbase(args.datadir, remotes["geminicassandra"], gemini)
print "Finished: geminicassandra, tools and data installed"
print " Tools installed in:\n %s" % args.tooldir
print " Data installed in:\n %s" % args.datadir
print " Run tests with:\n cd %s && bash %s" % (os.path.dirname(test_script),
os.path.basename(test_script))
print " NOTE: be sure to add %s/bin to your PATH." % args.tooldir
print " NOTE: Install data files for GERP_bp & CADD_scores (not installed by default).\n "
shutil.rmtree(work_dir)
def install_gemini(anaconda, remotes, datadir, tooldir, use_sudo):
"""Install geminicassandra plus python dependencies inside isolated Anaconda environment.
"""
# Work around issue with distribute where asks for 'distribute==0.0'
# try:
# subprocess.check_call([anaconda["easy_install"], "--upgrade", "distribute"])
# except subprocess.CalledProcessError:
# try:
# subprocess.check_call([anaconda["pip"], "install", "--upgrade", "distribute"])
# except subprocess.CalledProcessError:
# pass
# Ensure latest version of fabric for running CloudBioLinux
subprocess.check_call([anaconda["pip"], "install", "fabric>=1.7.0"])
# allow downloads excluded in recent pip (1.5 or greater) versions
try:
p = subprocess.Popen([anaconda["pip"], "--version"], stdout=subprocess.PIPE)
pip_version = p.communicate()[0].split()[1]
except:
pip_version = ""
pip_compat = []
if pip_version >= "1.5":
for req in ["python-graph-core", "python-graph-dot"]:
pip_compat += ["--allow-external", req, "--allow-unverified", req]
subprocess.check_call([anaconda["pip"], "install"] + pip_compat + ["-r", remotes["requirements_pip"]])
python_bin = os.path.join(anaconda["dir"], "bin", "python")
_cleanup_problem_files(anaconda["dir"])
_add_missing_inits(python_bin)
for final_name, ve_name in [("geminicassandra", "geminicassandra"), ("gemini_python", "python"),
("gemini_pip", "pip")]:
final_script = os.path.join(tooldir, "bin", final_name)
ve_script = os.path.join(anaconda["dir"], "bin", ve_name)
sudo_cmd = ["sudo"] if use_sudo else []
if os.path.lexists(final_script):
subprocess.check_call(sudo_cmd + ["rm", "-f", final_script])
else:
subprocess.check_call(sudo_cmd + ["mkdir", "-p", os.path.dirname(final_script)])
cmd = ["ln", "-s", ve_script, final_script]
subprocess.check_call(sudo_cmd + cmd)
library_loc = check_output("%s -c 'import geminicassandra; print geminicassandra.__file__'" % python_bin,
shell=True)
return {"fab": os.path.join(anaconda["dir"], "bin", "fab"),
"data_script": os.path.join(os.path.dirname(library_loc.strip()), "install-data.py"),
"python": python_bin,
"cmd": os.path.join(anaconda["dir"], "bin", "geminicassandra")}
def install_conda_pkgs(anaconda, remotes, args):
if args.gemini_version != 'latest':
pkgs = ["--file", remotes['requirements_conda']]
else:
pkgs = ["bx-python", "conda", "cython", "ipython", "jinja2", "nose", "numpy",
"pip", "pycrypto", "pyparsing", "pysam", "pyyaml",
"pyzmq", "pandas", "scipy", "cassandra-driver", "blist"]
channels = ["-c", "https://conda.binstar.org/bcbio"]
subprocess.check_call([anaconda["conda"], "install", "--yes"] + channels + pkgs)
def install_anaconda_python(args, remotes):
"""Provide isolated installation of Anaconda python.
http://docs.continuum.io/anaconda/index.html
"""
anaconda_dir = os.path.join(args.datadir, "anaconda")
bindir = os.path.join(anaconda_dir, "bin")
conda = os.path.join(bindir, "conda")
if platform.mac_ver()[0]:
distribution = "macosx"
else:
distribution = "linux"
if not os.path.exists(anaconda_dir) or not os.path.exists(conda):
if os.path.exists(anaconda_dir):
shutil.rmtree(anaconda_dir)
url = remotes["anaconda"] % ("MacOSX" if distribution == "macosx" else "Linux")
if not os.path.exists(os.path.basename(url)):
subprocess.check_call(["wget", url])
subprocess.check_call("bash %s -b -p %s" %
(os.path.basename(url), anaconda_dir), shell=True)
return {"conda": conda,
"pip": os.path.join(bindir, "pip"),
"easy_install": os.path.join(bindir, "easy_install"),
"dir": anaconda_dir}
def _add_missing_inits(python_bin):
"""pip/setuptools strips __init__.py files with namespace declarations.
I have no idea why, but this adds them back.
"""
library_loc = check_output("%s -c 'import pygraph.classes.graph; "
"print pygraph.classes.graph.__file__'" % python_bin,
shell=True)
pygraph_init = os.path.normpath(os.path.join(os.path.dirname(library_loc.strip()), os.pardir,
"__init__.py"))
if not os.path.exists(pygraph_init):
with open(pygraph_init, "w") as out_handle:
out_handle.write("__import__('pkg_resources').declare_namespace(__name__)\n")
def _cleanup_problem_files(venv_dir):
"""Remove problem bottle items in PATH which conflict with site-packages
"""
for cmd in ["bottle.py", "bottle.pyc"]:
bin_cmd = os.path.join(venv_dir, "bin", cmd)
if os.path.exists(bin_cmd):
os.remove(bin_cmd)
def install_tools(fab_cmd, fabfile, fabricrc):
"""Install 3rd party tools used by Gemini using a custom CloudBioLinux flavor.
"""
tools = ["tabix", "grabix", "samtools", "bedtools"]
flavor_dir = os.path.join(os.getcwd(), "geminicassandra-flavor")
if not os.path.exists(flavor_dir):
os.makedirs(flavor_dir)
with open(os.path.join(flavor_dir, "main.yaml"), "w") as out_handle:
out_handle.write("packages:\n")
out_handle.write(" - bio_nextgen\n")
out_handle.write("libraries:\n")
with open(os.path.join(flavor_dir, "custom.yaml"), "w") as out_handle:
out_handle.write("bio_nextgen:\n")
for tool in tools:
out_handle.write(" - %s\n" % tool)
cmd = [fab_cmd, "-f", fabfile, "-H", "localhost", "-c", fabricrc,
"install_biolinux:target=custom,flavor=%s" % flavor_dir]
subprocess.check_call(cmd)
def install_data(python_cmd, data_script, args):
"""Install biological data used by geminicassandra.
"""
data_dir = os.path.join(args.datadir, "gemini_data") if args.sharedpy else args.datadir
cmd = [python_cmd, data_script, data_dir]
if args.install_data:
print "Installing geminicassandra data..."
else:
cmd.append("--nodata")
subprocess.check_call(cmd)
def install_testbase(datadir, repo, gemini):
"""Clone or update geminicassandra code so we have the latest test suite.
"""
gemini_dir = os.path.join(datadir, "geminicassandra")
cur_dir = os.getcwd()
needs_git = True
if os.path.exists(gemini_dir):
os.chdir(gemini_dir)
try:
subprocess.check_call(["git", "pull", "origin", "master", "--tags"])
needs_git = False
except:
os.chdir(cur_dir)
shutil.rmtree(gemini_dir)
if needs_git:
os.chdir(os.path.split(gemini_dir)[0])
subprocess.check_call(["git", "clone", repo])
os.chdir(gemini_dir)
_update_testdir_revision(gemini["cmd"])
os.chdir(cur_dir)
return os.path.join(gemini_dir, "master-test.sh")
def _update_testdir_revision(gemini_cmd):
"""Update test directory to be in sync with a tagged installed version or development.
"""
try:
p = subprocess.Popen([gemini_cmd, "--version"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
gversion = p.communicate()[0].split()[1]
except:
gversion = ""
tag = ""
if gversion:
try:
p = subprocess.Popen("git tag -l | grep %s" % gversion, stdout=subprocess.PIPE, shell=True)
tag = p.communicate()[0].strip()
except:
tag = ""
if tag:
subprocess.check_call(["git", "checkout", "tags/%s" % tag])
pass
else:
subprocess.check_call(["git", "reset", "--hard", "HEAD"])
def write_fabricrc(base_file, tooldir, datadir, use_sudo):
out_file = os.path.join(os.getcwd(), os.path.basename(base_file))
with open(base_file) as in_handle:
with open(out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("system_install"):
line = "system_install = %s\n" % tooldir
elif line.startswith("local_install"):
line = "local_install = %s/install\n" % tooldir
elif line.startswith("data_files"):
line = "data_files = %s\n" % datadir
elif line.startswith("use_sudo"):
line = "use_sudo = %s\n" % use_sudo
elif line.startswith("edition"):
line = "edition = minimal\n"
elif line.startswith("#galaxy_home"):
line = "galaxy_home = %s\n" % os.path.join(datadir, "galaxy")
out_handle.write(line)
return out_file
def make_dirs(args):
sudo_cmd = ["sudo"] if args.sudo else []
for dname in [args.datadir, args.tooldir]:
if not os.path.exists(dname):
subprocess.check_call(sudo_cmd + ["mkdir", "-p", dname])
username = check_output("echo $USER", shell=True).strip()
subprocess.check_call(sudo_cmd + ["chown", username, dname])
def get_cloudbiolinux(repo):
base_dir = os.path.join(os.getcwd(), "cloudbiolinux")
if not os.path.exists(base_dir):
subprocess.check_call(["git", "clone", repo])
return {"fabricrc": os.path.join(base_dir, "config", "fabricrc.txt"),
"tool_fabfile": os.path.join(base_dir, "fabfile.py")}
def check_dependencies():
"""Ensure required tools for installation are present.
"""
print "Checking required dependencies..."
for cmd, url in [("git", "http://git-scm.com/"),
("wget", "http://www.gnu.org/software/wget/"),
("curl", "http://curl.haxx.se/")]:
try:
retcode = subprocess.call([cmd, "--version"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError:
retcode = 127
if retcode == 127:
raise OSError("geminicassandra requires %s (%s)" % (cmd, url))
else:
print " %s found" % cmd
def check_output(*popenargs, **kwargs):
"""python2.6 compatible version of check_output.
Thanks to:
https://github.com/stackforge/bindep/blob/master/bindep/support_py26.py
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Automated installer for geminicassandra framework.")
parser.add_argument("tooldir", help="Directory to install 3rd party software tools",
type=os.path.abspath)
parser.add_argument("datadir", help="Directory to install geminicassandra data files",
type=os.path.abspath)
parser.add_argument("--geminicassandra-version", dest="gemini_version", default="latest",
help="Install one specific geminicassandra version with a fixed dependency chain.")
parser.add_argument("--nosudo", help="Specify we cannot use sudo for commands",
dest="sudo", action="store_false", default=True)
parser.add_argument("--notools", help="Do not install tool dependencies",
dest="install_tools", action="store_false", default=True)
parser.add_argument("--nodata", help="Do not install data dependencies",
dest="install_data", action="store_false", default=True)
parser.add_argument("--sharedpy", help=("Indicate we share an Anaconda Python directory with "
"another project. Creates unique geminicassandra data directory."),
action="store_true", default=False)
if len(sys.argv) == 1:
parser.print_help()
else:
main(parser.parse_args())
| mit |
pv/scikit-learn | sklearn/linear_model/bayes.py | 220 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
JeanKossaifi/scikit-learn | examples/feature_selection/plot_permutation_test_for_classification.py | 250 | 2233 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(y, 2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
| bsd-3-clause |
cancan101/tensorflow | tensorflow/examples/learn/wide_n_deep_tutorial.py | 24 | 8941 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example code for TensorFlow Wide & Deep Tutorial using TF.Learn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
from six.moves import urllib
import pandas as pd
import tensorflow as tf
COLUMNS = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"]
LABEL_COLUMN = "label"
CATEGORICAL_COLUMNS = ["workclass", "education", "marital_status", "occupation",
"relationship", "race", "gender", "native_country"]
CONTINUOUS_COLUMNS = ["age", "education_num", "capital_gain", "capital_loss",
"hours_per_week"]
def maybe_download(train_data, test_data):
"""Maybe downloads training data and returns train and test file names."""
if train_data:
train_file_name = train_data
else:
train_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve("http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data", train_file.name) # pylint: disable=line-too-long
train_file_name = train_file.name
train_file.close()
print("Training data is downloaded to %s" % train_file_name)
if test_data:
test_file_name = test_data
else:
test_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve("http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test", test_file.name) # pylint: disable=line-too-long
test_file_name = test_file.name
test_file.close()
print("Test data is downloaded to %s" % test_file_name)
return train_file_name, test_file_name
def build_estimator(model_dir, model_type):
"""Build an estimator."""
# Sparse base columns.
gender = tf.contrib.layers.sparse_column_with_keys(column_name="gender",
keys=["female", "male"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
# Transformations.
age_buckets = tf.contrib.layers.bucketized_column(age,
boundaries=[
18, 25, 30, 35, 40, 45,
50, 55, 60, 65
])
# Wide columns and deep columns.
wide_columns = [gender, native_country, education, occupation, workclass,
relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column(
[age_buckets, education, occupation],
hash_bucket_size=int(1e6)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(native_country,
dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
]
if model_type == "wide":
m = tf.contrib.learn.LinearClassifier(model_dir=model_dir,
feature_columns=wide_columns)
elif model_type == "deep":
m = tf.contrib.learn.DNNClassifier(model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=[100, 50])
else:
m = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50])
return m
def input_fn(df):
"""Input builder function."""
# Creates a dictionary mapping from each continuous feature column name (k) to
# the values of that column stored in a constant Tensor.
continuous_cols = {k: tf.constant(df[k].values) for k in CONTINUOUS_COLUMNS}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in CATEGORICAL_COLUMNS}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols)
feature_cols.update(categorical_cols)
# Converts the label column into a constant Tensor.
label = tf.constant(df[LABEL_COLUMN].values)
# Returns the feature columns and the label.
return feature_cols, label
def train_and_eval(model_dir, model_type, train_steps, train_data, test_data):
"""Train and evaluate the model."""
train_file_name, test_file_name = maybe_download(train_data, test_data)
df_train = pd.read_csv(
tf.gfile.Open(train_file_name),
names=COLUMNS,
skipinitialspace=True,
engine="python")
df_test = pd.read_csv(
tf.gfile.Open(test_file_name),
names=COLUMNS,
skipinitialspace=True,
skiprows=1,
engine="python")
# remove NaN elements
df_train = df_train.dropna(how='any', axis=0)
df_test = df_test.dropna(how='any', axis=0)
df_train[LABEL_COLUMN] = (
df_train["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
df_test[LABEL_COLUMN] = (
df_test["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
model_dir = tempfile.mkdtemp() if not model_dir else model_dir
print("model directory = %s" % model_dir)
m = build_estimator(model_dir, model_type)
m.fit(input_fn=lambda: input_fn(df_train), steps=train_steps)
results = m.evaluate(input_fn=lambda: input_fn(df_test), steps=1)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
FLAGS = None
def main(_):
train_and_eval(FLAGS.model_dir, FLAGS.model_type, FLAGS.train_steps,
FLAGS.train_data, FLAGS.test_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--model_dir",
type=str,
default="",
help="Base directory for output models."
)
parser.add_argument(
"--model_type",
type=str,
default="wide_n_deep",
help="Valid model types: {'wide', 'deep', 'wide_n_deep'}."
)
parser.add_argument(
"--train_steps",
type=int,
default=200,
help="Number of training steps."
)
parser.add_argument(
"--train_data",
type=str,
default="",
help="Path to the training data."
)
parser.add_argument(
"--test_data",
type=str,
default="",
help="Path to the test data."
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
wzbozon/statsmodels | statsmodels/graphics/tsaplots.py | 16 | 10392 | """Correlation plot functions."""
import numpy as np
from statsmodels.graphics import utils
from statsmodels.tsa.stattools import acf, pacf
def plot_acf(x, ax=None, lags=None, alpha=.05, use_vlines=True, unbiased=False,
fft=False, **kwargs):
"""Plot the autocorrelation function
Plots lags on the horizontal and the correlations on vertical axis.
Parameters
----------
x : array_like
Array of time-series values
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
lags : array_like, optional
Array of lag values, used on horizontal axis.
If not given, ``lags=np.arange(len(corr))`` is used.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett's formula. If None, no confidence intervals are plotted.
use_vlines : bool, optional
If True, vertical lines and markers are plotted.
If False, only markers are plotted. The default marker is 'o'; it can
be overridden with a ``marker`` kwarg.
unbiased : bool
If True, then denominators for autocovariance are n-k, otherwise n
fft : bool, optional
If True, computes the ACF via FFT.
**kwargs : kwargs, optional
Optional keyword arguments that are directly passed on to the
Matplotlib ``plot`` and ``axhline`` functions.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
matplotlib.pyplot.xcorr
matplotlib.pyplot.acorr
mpl_examples/pylab_examples/xcorr_demo.py
Notes
-----
Adapted from matplotlib's `xcorr`.
Data are plotted as ``plot(lags, corr, **kwargs)``
"""
fig, ax = utils.create_mpl_ax(ax)
if lags is None:
lags = np.arange(len(x))
nlags = len(lags) - 1
else:
nlags = lags
lags = np.arange(lags + 1) # +1 for zero lag
confint = None
# acf has different return type based on alpha
if alpha is None:
acf_x = acf(x, nlags=nlags, alpha=alpha, fft=fft,
unbiased=unbiased)
else:
acf_x, confint = acf(x, nlags=nlags, alpha=alpha, fft=fft,
unbiased=unbiased)
if use_vlines:
ax.vlines(lags, [0], acf_x, **kwargs)
ax.axhline(**kwargs)
kwargs.setdefault('marker', 'o')
kwargs.setdefault('markersize', 5)
kwargs.setdefault('linestyle', 'None')
ax.margins(.05)
ax.plot(lags, acf_x, **kwargs)
ax.set_title("Autocorrelation")
if confint is not None:
# center the confidence interval TODO: do in acf?
ax.fill_between(lags, confint[:,0] - acf_x, confint[:,1] - acf_x, alpha=.25)
return fig
def plot_pacf(x, ax=None, lags=None, alpha=.05, method='ywm',
use_vlines=True, **kwargs):
"""Plot the partial autocorrelation function
Plots lags on the horizontal and the correlations on vertical axis.
Parameters
----------
x : array_like
Array of time-series values
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
lags : array_like, optional
Array of lag values, used on horizontal axis.
If not given, ``lags=np.arange(len(corr))`` is used.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x))
method : 'ywunbiased' (default) or 'ywmle' or 'ols'
specifies which method for the calculations to use:
- yw or ywunbiased : yule walker with bias correction in denominator
for acovf
- ywm or ywmle : yule walker without bias correction
- ols - regression of time series on lags of it and on constant
- ld or ldunbiased : Levinson-Durbin recursion with bias correction
- ldb or ldbiased : Levinson-Durbin recursion without bias correction
use_vlines : bool, optional
If True, vertical lines and markers are plotted.
If False, only markers are plotted. The default marker is 'o'; it can
be overridden with a ``marker`` kwarg.
**kwargs : kwargs, optional
Optional keyword arguments that are directly passed on to the
Matplotlib ``plot`` and ``axhline`` functions.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
matplotlib.pyplot.xcorr
matplotlib.pyplot.acorr
mpl_examples/pylab_examples/xcorr_demo.py
Notes
-----
Adapted from matplotlib's `xcorr`.
Data are plotted as ``plot(lags, corr, **kwargs)``
"""
fig, ax = utils.create_mpl_ax(ax)
if lags is None:
lags = np.arange(len(x))
nlags = len(lags) - 1
else:
nlags = lags
lags = np.arange(lags + 1) # +1 for zero lag
confint = None
if alpha is None:
acf_x = pacf(x, nlags=nlags, alpha=alpha, method=method)
else:
acf_x, confint = pacf(x, nlags=nlags, alpha=alpha, method=method)
if use_vlines:
ax.vlines(lags, [0], acf_x, **kwargs)
ax.axhline(**kwargs)
# center the confidence interval TODO: do in acf?
kwargs.setdefault('marker', 'o')
kwargs.setdefault('markersize', 5)
kwargs.setdefault('linestyle', 'None')
ax.margins(.05)
ax.plot(lags, acf_x, **kwargs)
ax.set_title("Partial Autocorrelation")
if confint is not None:
# center the confidence interval TODO: do in acf?
ax.fill_between(lags, confint[:,0] - acf_x, confint[:,1] - acf_x, alpha=.25)
return fig
def seasonal_plot(grouped_x, xticklabels, ylabel=None, ax=None):
"""
Consider using one of month_plot or quarter_plot unless you need
irregular plotting.
Parameters
----------
grouped_x : iterable of DataFrames
Should be a GroupBy object (or similar pair of group_names and groups
as DataFrames) with a DatetimeIndex or PeriodIndex
"""
fig, ax = utils.create_mpl_ax(ax)
start = 0
ticks = []
for season, df in grouped_x:
df = df.copy() # or sort balks for series. may be better way
df.sort()
nobs = len(df)
x_plot = np.arange(start, start + nobs)
ticks.append(x_plot.mean())
ax.plot(x_plot, df.values, 'k')
ax.hlines(df.values.mean(), x_plot[0], x_plot[-1], colors='k')
start += nobs
ax.set_xticks(ticks)
ax.set_xticklabels(xticklabels)
ax.set_ylabel(ylabel)
ax.margins(.1, .05)
return fig
def month_plot(x, dates=None, ylabel=None, ax=None):
"""
Seasonal plot of monthly data
Parameters
----------
x : array-like
Seasonal data to plot. If dates is None, x must be a pandas object
with a PeriodIndex or DatetimeIndex with a monthly frequency.
dates : array-like, optional
If `x` is not a pandas object, then dates must be supplied.
ylabel : str, optional
The label for the y-axis. Will attempt to use the `name` attribute
of the Series.
ax : matplotlib.axes, optional
Existing axes instance.
Returns
-------
matplotlib.Figure
Examples
--------
>>> import statsmodels.api as sm
>>> import pandas as pd
>>> dta = sm.datasets.elnino.load_pandas().data
>>> dta['YEAR'] = dta.YEAR.astype(int).astype(str)
>>> dta = dta.set_index('YEAR').T.unstack()
>>> dates = map(lambda x : pd.datetools.parse('1 '+' '.join(x)),
... dta.index.values)
>>> dta.index = pd.DatetimeIndex(dates, freq='M')
>>> fig = sm.graphics.tsa.month_plot(dta)
.. plot:: plots/graphics_month_plot.py
"""
from pandas import DataFrame
if dates is None:
from statsmodels.tools.data import _check_period_index
_check_period_index(x, freq="M")
else:
from pandas import Series, PeriodIndex
x = Series(x, index=PeriodIndex(dates, freq="M"))
xticklabels = ['j','f','m','a','m','j','j','a','s','o','n','d']
return seasonal_plot(x.groupby(lambda y : y.month), xticklabels,
ylabel=ylabel, ax=ax)
def quarter_plot(x, dates=None, ylabel=None, ax=None):
"""
Seasonal plot of quarterly data
Parameters
----------
x : array-like
Seasonal data to plot. If dates is None, x must be a pandas object
with a PeriodIndex or DatetimeIndex with a monthly frequency.
dates : array-like, optional
If `x` is not a pandas object, then dates must be supplied.
ylabel : str, optional
The label for the y-axis. Will attempt to use the `name` attribute
of the Series.
ax : matplotlib.axes, optional
Existing axes instance.
Returns
-------
matplotlib.Figure
"""
from pandas import DataFrame
if dates is None:
from statsmodels.tools.data import _check_period_index
_check_period_index(x, freq="Q")
else:
from pandas import Series, PeriodIndex
x = Series(x, index=PeriodIndex(dates, freq="Q"))
xticklabels = ['q1', 'q2', 'q3', 'q4']
return seasonal_plot(x.groupby(lambda y : y.quarter), xticklabels,
ylabel=ylabel, ax=ax)
if __name__ == "__main__":
import pandas as pd
#R code to run to load that dataset in this directory
#data(co2)
#library(zoo)
#write.csv(as.data.frame(list(date=as.Date(co2), co2=coredata(co2))), "co2.csv", row.names=FALSE)
co2 = pd.read_csv("co2.csv", index_col=0, parse_dates=True)
month_plot(co2.co2)
#will work when dates are sorted
#co2 = sm.datasets.get_rdataset("co2", cache=True)
x = pd.Series(np.arange(20),
index=pd.PeriodIndex(start='1/1/1990', periods=20, freq='Q'))
quarter_plot(x)
| bsd-3-clause |
loli/semisupervisedforests | sklearn/cluster/tests/test_spectral.py | 11 | 7958 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
"""Histogram kernel implemented as a callable."""
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
nsat/gnuradio | gr-filter/examples/channelize.py | 58 | 7003 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 2000000 # number of samples to use
self._fs = 1000 # initial sampling rate
self._M = M = 9 # Number of channels to channelize
self._ifs = M*self._fs # initial sampling rate
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(1, self._ifs, 475.50, 50,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
# Create a set of signals at different frequencies
# freqs lists the frequencies of the signals that get stored
# in the list "signals", which then get summed together
self.signals = list()
self.add = blocks.add_cc()
freqs = [-70, -50, -30, -10, 10, 20, 40, 60, 80]
for i in xrange(len(freqs)):
f = freqs[i] + (M/2-M+i+1)*self._fs
self.signals.append(analog.sig_source_c(self._ifs, analog.GR_SIN_WAVE, f, 1))
self.connect(self.signals[i], (self.add,i))
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps, 1)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Use this to play with the channel mapping
#self.pfb.set_channel_map([5,6,7,8,0,1,2,3,4])
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(blocks.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
Ns = 1000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._ifs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(X))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
procoder317/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
jorik041/scikit-learn | sklearn/svm/tests/test_svm.py | 116 | 31653 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
| bsd-3-clause |
blondegeek/pymatgen | pymatgen/analysis/phase_diagram.py | 2 | 83137 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import re
import collections
import itertools
import math
import logging
from monty.json import MSONable, MontyDecoder
from functools import lru_cache
import numpy as np
from scipy.spatial import ConvexHull
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element, DummySpecie, get_el_sp
from pymatgen.util.coord import Simplex, in_coord_list
from pymatgen.util.string import latexify
from pymatgen.util.plotting import pretty_plot
from pymatgen.analysis.reaction_calculator import Reaction, \
ReactionError
"""
This module defines tools to generate and analyze phase diagrams.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "May 16, 2011"
logger = logging.getLogger(__name__)
class PDEntry(MSONable):
"""
An object encompassing all relevant data for phase diagrams.
.. attribute:: composition
The composition associated with the PDEntry.
.. attribute:: energy
The energy associated with the entry.
.. attribute:: name
A name for the entry. This is the string shown in the phase diagrams.
By default, this is the reduced formula for the composition, but can be
set to some other string for display purposes.
.. attribute:: attribute
A arbitrary attribute.
Args:
composition (Composition): Composition
energy (float): Energy for composition.
name (str): Optional parameter to name the entry. Defaults to the
reduced chemical formula.
attribute: Optional attribute of the entry. This can be used to
specify that the entry is a newly found compound, or to specify a
particular label for the entry, or else ... Used for further
analysis and plotting purposes. An attribute can be anything
but must be MSONable.
"""
def __init__(self, composition: Composition, energy: float,
name: str = None, attribute: object = None):
self.energy = energy
self.composition = Composition(composition)
self.name = name if name else self.composition.reduced_formula
self.attribute = attribute
@property
def energy_per_atom(self):
"""
Returns the final energy per atom.
"""
return self.energy / self.composition.num_atoms
@property
def is_element(self):
"""
True if the entry is an element.
"""
return self.composition.is_element
def __repr__(self):
return "PDEntry : {} with energy = {:.4f}".format(self.composition,
self.energy)
def __str__(self):
return self.__repr__()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"composition": self.composition.as_dict(),
"energy": self.energy,
"name": self.name,
"attribute": self.attribute}
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.as_dict() == other.as_dict()
else:
return False
def __hash__(self):
return id(self)
@classmethod
def from_dict(cls, d):
return cls(Composition(d["composition"]), d["energy"],
d["name"] if "name" in d else None,
d["attribute"] if "attribute" in d else None)
class GrandPotPDEntry(PDEntry):
"""
A grand potential pd entry object encompassing all relevant data for phase
diagrams. Chemical potentials are given as a element-chemical potential
dict.
Args:
entry: A PDEntry-like object.
chempots: Chemical potential specification as {Element: float}.
name: Optional parameter to name the entry. Defaults to the reduced
chemical formula of the original entry.
"""
def __init__(self, entry, chempots, name=None):
comp = entry.composition
self.original_entry = entry
self.original_comp = comp
grandpot = entry.energy - sum([comp[el] * pot
for el, pot in chempots.items()])
self.chempots = chempots
new_comp_map = {el: comp[el] for el in comp.elements
if el not in chempots}
super().__init__(new_comp_map, grandpot, entry.name)
self.name = name if name else entry.name
@property
def is_element(self):
"""
True if the entry is an element.
"""
return self.original_comp.is_element
def __repr__(self):
chempot_str = " ".join(["mu_%s = %.4f" % (el, mu)
for el, mu in self.chempots.items()])
return "GrandPotPDEntry with original composition " + \
"{}, energy = {:.4f}, {}".format(self.original_entry.composition,
self.original_entry.energy,
chempot_str)
def __str__(self):
return self.__repr__()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry": self.original_entry.as_dict(),
"chempots": {el.symbol: u for el, u in self.chempots.items()},
"name": self.name}
@classmethod
def from_dict(cls, d):
chempots = {Element(symbol): u for symbol, u in d["chempots"].items()}
entry = MontyDecoder().process_decoded(d["entry"])
return cls(entry, chempots, d["name"])
def __getattr__(self, a):
"""
Delegate attribute to original entry if available.
"""
if hasattr(self.original_entry, a):
return getattr(self.original_entry, a)
raise AttributeError(a)
class TransformedPDEntry(PDEntry):
"""
This class repesents a TransformedPDEntry, which allows for a PDEntry to be
transformed to a different composition coordinate space. It is used in the
construction of phase diagrams that do not have elements as the terminal
compositions.
Args:
comp (Composition): Transformed composition as a Composition.
original_entry (PDEntry): Original entry that this entry arose from.
"""
def __init__(self, comp, original_entry):
super().__init__(comp, original_entry.energy)
self.original_entry = original_entry
self.name = original_entry.name
def __getattr__(self, a):
"""
Delegate attribute to original entry if available.
"""
if hasattr(self.original_entry, a):
return getattr(self.original_entry, a)
raise AttributeError(a)
def __repr__(self):
output = ["TransformedPDEntry {}".format(self.composition),
" with original composition {}"
.format(self.original_entry.composition),
", E = {:.4f}".format(self.original_entry.energy)]
return "".join(output)
def __str__(self):
return self.__repr__()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry": self.original_entry.as_dict(),
"composition": self.composition}
@classmethod
def from_dict(cls, d):
entry = MontyDecoder().process_decoded(d["entry"])
return cls(d["composition"], entry)
class PhaseDiagram(MSONable):
"""
Simple phase diagram class taking in elements and entries as inputs.
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
.. attribute: elements:
Elements in the phase diagram.
..attribute: all_entries
All entries provided for Phase Diagram construction. Note that this
does not mean that all these entries are actually used in the phase
diagram. For example, this includes the positive formation energy
entries that are filtered out before Phase Diagram construction.
.. attribute: qhull_data
Data used in the convex hull operation. This is essentially a matrix of
composition data and energy per atom values created from qhull_entries.
.. attribute: qhull_entries:
Actual entries used in convex hull. Excludes all positive formation
energy entries.
.. attribute: dim
The dimensionality of the phase diagram.
.. attribute: facets
Facets of the phase diagram in the form of [[1,2,3],[4,5,6]...].
For a ternary, it is the indices (references to qhull_entries and
qhull_data) for the vertices of the phase triangles. Similarly
extended to higher D simplices for higher dimensions.
.. attribute: el_refs:
List of elemental references for the phase diagrams. These are
entries corresponding to the lowest energy element entries for simple
compositional phase diagrams.
.. attribute: simplices:
The simplices of the phase diagram as a list of np.ndarray, i.e.,
the list of stable compositional coordinates in the phase diagram.
"""
# Tolerance for determining if formation energy is positive.
formation_energy_tol = 1e-11
numerical_tol = 1e-8
def __init__(self, entries, elements=None):
"""
Standard constructor for phase diagram.
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves.
"""
if elements is None:
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
elements = list(elements)
dim = len(elements)
get_reduced_comp = lambda e: e.composition.reduced_composition
entries = sorted(entries, key=get_reduced_comp)
el_refs = {}
min_entries = []
all_entries = []
for c, g in itertools.groupby(entries, key=get_reduced_comp):
g = list(g)
min_entry = min(g, key=lambda e: e.energy_per_atom)
if c.is_element:
el_refs[c.elements[0]] = min_entry
min_entries.append(min_entry)
all_entries.extend(g)
if len(el_refs) != dim:
raise PhaseDiagramError(
"There are no entries associated with a terminal element!.")
data = np.array([
[e.composition.get_atomic_fraction(el) for el in elements] + [
e.energy_per_atom]
for e in min_entries
])
# Use only entries with negative formation energy
vec = [el_refs[el].energy_per_atom for el in elements] + [-1]
form_e = -np.dot(data, vec)
inds = np.where(form_e < -self.formation_energy_tol)[0].tolist()
# Add the elemental references
inds.extend([min_entries.index(el) for el in el_refs.values()])
qhull_entries = [min_entries[i] for i in inds]
qhull_data = data[inds][:, 1:]
# Add an extra point to enforce full dimensionality.
# This point will be present in all upper hull facets.
extra_point = np.zeros(dim) + 1 / dim
extra_point[-1] = np.max(qhull_data) + 1
qhull_data = np.concatenate([qhull_data, [extra_point]], axis=0)
if dim == 1:
self.facets = [qhull_data.argmin(axis=0)]
else:
facets = get_facets(qhull_data)
finalfacets = []
for facet in facets:
# Skip facets that include the extra point
if max(facet) == len(qhull_data) - 1:
continue
m = qhull_data[facet]
m[:, -1] = 1
if abs(np.linalg.det(m)) > 1e-14:
finalfacets.append(facet)
self.facets = finalfacets
self.simplexes = [Simplex(qhull_data[f, :-1]) for f in self.facets]
self.all_entries = all_entries
self.qhull_data = qhull_data
self.dim = dim
self.el_refs = el_refs
self.elements = elements
self.qhull_entries = qhull_entries
self._stable_entries = set(self.qhull_entries[i] for i in
set(itertools.chain(*self.facets)))
def pd_coords(self, comp):
"""
The phase diagram is generated in a reduced dimensional space
(n_elements - 1). This function returns the coordinates in that space.
These coordinates are compatible with the stored simplex objects.
"""
if set(comp.elements).difference(self.elements):
raise ValueError('{} has elements not in the phase diagram {}'
''.format(comp, self.elements))
return np.array(
[comp.get_atomic_fraction(el) for el in self.elements[1:]])
@property
def all_entries_hulldata(self):
data = []
for entry in self.all_entries:
comp = entry.composition
row = [comp.get_atomic_fraction(el) for el in self.elements]
row.append(entry.energy_per_atom)
data.append(row)
return np.array(data)[:, 1:]
@property
def unstable_entries(self):
"""
Entries that are unstable in the phase diagram. Includes positive
formation energy entries.
"""
return [e for e in self.all_entries if e not in self.stable_entries]
@property
def stable_entries(self):
"""
Returns the stable entries in the phase diagram.
"""
return self._stable_entries
def get_form_energy(self, entry):
"""
Returns the formation energy for an entry (NOT normalized) from the
elemental references.
Args:
entry: A PDEntry-like object.
Returns:
Formation energy from the elemental references.
"""
c = entry.composition
return entry.energy - sum([c[el] * self.el_refs[el].energy_per_atom
for el in c.elements])
def get_form_energy_per_atom(self, entry):
"""
Returns the formation energy per atom for an entry from the
elemental references.
Args:
entry: An PDEntry-like object
Returns:
Formation energy **per atom** from the elemental references.
"""
return self.get_form_energy(entry) / entry.composition.num_atoms
def __repr__(self):
return self.__str__()
def __str__(self):
symbols = [el.symbol for el in self.elements]
output = ["{} phase diagram".format("-".join(symbols)),
"{} stable phases: ".format(len(self.stable_entries)),
", ".join([entry.name
for entry in self.stable_entries])]
return "\n".join(output)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"elements": [e.as_dict() for e in self.elements]}
@classmethod
def from_dict(cls, d):
entries = [MontyDecoder().process_decoded(dd) for dd in d["all_entries"]]
elements = [Element.from_dict(dd) for dd in d["elements"]]
return cls(entries, elements)
@lru_cache(1)
def _get_facet_and_simplex(self, comp):
"""
Get any facet that a composition falls into. Cached so successive
calls at same composition are fast.
"""
c = self.pd_coords(comp)
for f, s in zip(self.facets, self.simplexes):
if s.in_simplex(c, PhaseDiagram.numerical_tol / 10):
return f, s
raise RuntimeError("No facet found for comp = {}".format(comp))
def _get_facet_chempots(self, facet):
"""
Calculates the chemical potentials for each element within a facet.
Args:
facet: Facet of the phase diagram.
Returns:
{ element: chempot } for all elements in the phase diagram.
"""
complist = [self.qhull_entries[i].composition for i in facet]
energylist = [self.qhull_entries[i].energy_per_atom for i in facet]
m = [[c.get_atomic_fraction(e) for e in self.elements] for c in
complist]
chempots = np.linalg.solve(m, energylist)
return dict(zip(self.elements, chempots))
def get_decomposition(self, comp):
"""
Provides the decomposition at a particular composition.
Args:
comp: A composition
Returns:
Decomposition as a dict of {Entry: amount}
"""
facet, simplex = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self.pd_coords(comp))
return {self.qhull_entries[f]: amt
for f, amt in zip(facet, decomp_amts)
if abs(amt) > PhaseDiagram.numerical_tol}
def get_hull_energy(self, comp):
"""
Args:
comp (Composition): Input composition
Returns:
Energy of lowest energy equilibrium at desired composition. Not
normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O)
"""
e = 0
for k, v in self.get_decomposition(comp).items():
e += k.energy_per_atom * v
return e * comp.num_atoms
def get_decomp_and_e_above_hull(self, entry, allow_negative=False):
"""
Provides the decomposition and energy above convex hull for an entry.
Due to caching, can be much faster if entries with the same composition
are processed together.
Args:
entry: A PDEntry like object
allow_negative: Whether to allow negative e_above_hulls. Used to
calculate equilibrium reaction energies. Defaults to False.
Returns:
(decomp, energy above convex hull) Stable entries should have
energy above hull of 0. The decomposition is provided as a dict of
{Entry: amount}.
"""
if entry in self.stable_entries:
return {entry: 1}, 0
comp = entry.composition
facet, simplex = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self.pd_coords(comp))
decomp = {self.qhull_entries[f]: amt
for f, amt in zip(facet, decomp_amts)
if abs(amt) > PhaseDiagram.numerical_tol}
energies = [self.qhull_entries[i].energy_per_atom for i in facet]
ehull = entry.energy_per_atom - np.dot(decomp_amts, energies)
if allow_negative or ehull >= -PhaseDiagram.numerical_tol:
return decomp, ehull
raise ValueError("No valid decomp found!")
def get_e_above_hull(self, entry):
"""
Provides the energy above convex hull for an entry
Args:
entry: A PDEntry like object
Returns:
Energy above convex hull of entry. Stable entries should have
energy above hull of 0.
"""
return self.get_decomp_and_e_above_hull(entry)[1]
def get_equilibrium_reaction_energy(self, entry):
"""
Provides the reaction energy of a stable entry from the neighboring
equilibrium stable entries (also known as the inverse distance to
hull).
Args:
entry: A PDEntry like object
Returns:
Equilibrium reaction energy of entry. Stable entries should have
equilibrium reaction energy <= 0.
"""
if entry not in self.stable_entries:
raise ValueError("Equilibrium reaction energy is available only "
"for stable entries.")
if entry.is_element:
return 0
entries = [e for e in self.stable_entries if e != entry]
modpd = PhaseDiagram(entries, self.elements)
return modpd.get_decomp_and_e_above_hull(entry,
allow_negative=True)[1]
def get_composition_chempots(self, comp):
facet = self._get_facet_and_simplex(comp)[0]
return self._get_facet_chempots(facet)
def get_all_chempots(self, comp):
#note the top part takes from format of _get_facet_and_simplex,
# but wants to return all facets rather than the first one that meets this criteria
c = self.pd_coords(comp)
allfacets = []
for f, s in zip(self.facets, self.simplexes):
if s.in_simplex(c, PhaseDiagram.numerical_tol / 10):
allfacets.append(f)
if not len(allfacets):
raise RuntimeError("No facets found for comp = {}".format(comp))
else:
chempots = {}
for facet in allfacets:
facet_elt_list = [self.qhull_entries[j].name for j in facet]
facet_name = '-'.join(facet_elt_list)
chempots[facet_name] = self._get_facet_chempots(facet)
return chempots
def get_transition_chempots(self, element):
"""
Get the critical chemical potentials for an element in the Phase
Diagram.
Args:
element: An element. Has to be in the PD in the first place.
Returns:
A sorted sequence of critical chemical potentials, from less
negative to more negative.
"""
if element not in self.elements:
raise ValueError("get_transition_chempots can only be called with "
"elements in the phase diagram.")
critical_chempots = []
for facet in self.facets:
chempots = self._get_facet_chempots(facet)
critical_chempots.append(chempots[element])
clean_pots = []
for c in sorted(critical_chempots):
if len(clean_pots) == 0:
clean_pots.append(c)
else:
if abs(c - clean_pots[-1]) > PhaseDiagram.numerical_tol:
clean_pots.append(c)
clean_pots.reverse()
return tuple(clean_pots)
def get_critical_compositions(self, comp1, comp2):
"""
Get the critical compositions along the tieline between two
compositions. I.e. where the decomposition products change.
The endpoints are also returned.
Args:
comp1, comp2 (Composition): compositions that define the tieline
Returns:
[(Composition)]: list of critical compositions. All are of
the form x * comp1 + (1-x) * comp2
"""
n1 = comp1.num_atoms
n2 = comp2.num_atoms
pd_els = self.elements
# the reduced dimensionality Simplexes don't use the
# first element in the PD
c1 = self.pd_coords(comp1)
c2 = self.pd_coords(comp2)
# none of the projections work if c1 == c2, so just return *copies*
# of the inputs
if np.all(c1 == c2):
return [comp1.copy(), comp2.copy()]
intersections = [c1, c2]
for sc in self.simplexes:
intersections.extend(sc.line_intersection(c1, c2))
intersections = np.array(intersections)
# find position along line
l = (c2 - c1)
l /= np.sum(l ** 2) ** 0.5
proj = np.dot(intersections - c1, l)
# only take compositions between endpoints
proj = proj[np.logical_and(proj > -self.numerical_tol,
proj < proj[1] + self.numerical_tol)]
proj.sort()
# only unique compositions
valid = np.ones(len(proj), dtype=np.bool)
valid[1:] = proj[1:] > proj[:-1] + self.numerical_tol
proj = proj[valid]
ints = c1 + l * proj[:, None]
# reconstruct full-dimensional composition array
cs = np.concatenate([np.array([1 - np.sum(ints, axis=-1)]).T,
ints], axis=-1)
# mixing fraction when compositions are normalized
x = proj / np.dot(c2 - c1, l)
# mixing fraction when compositions are not normalized
x_unnormalized = x * n1 / (n2 + x * (n1 - n2))
num_atoms = n1 + (n2 - n1) * x_unnormalized
cs *= num_atoms[:, None]
return [Composition((c, v) for c, v in zip(pd_els, m)) for m in cs]
def get_element_profile(self, element, comp, comp_tol=1e-5):
"""
Provides the element evolution data for a composition.
For example, can be used to analyze Li conversion voltages by varying
uLi and looking at the phases formed. Also can be used to analyze O2
evolution by varying uO2.
Args:
element: An element. Must be in the phase diagram.
comp: A Composition
comp_tol: The tolerance to use when calculating decompositions.
Phases with amounts less than this tolerance are excluded.
Defaults to 1e-5.
Returns:
Evolution data as a list of dictionaries of the following format:
[ {'chempot': -10.487582010000001, 'evolution': -2.0,
'reaction': Reaction Object], ...]
"""
element = get_el_sp(element)
element = Element(element.symbol)
if element not in self.elements:
raise ValueError("get_transition_chempots can only be called with"
" elements in the phase diagram.")
gccomp = Composition({el: amt for el, amt in comp.items()
if el != element})
elref = self.el_refs[element]
elcomp = Composition(element.symbol)
evolution = []
for cc in self.get_critical_compositions(elcomp, gccomp)[1:]:
decomp_entries = self.get_decomposition(cc).keys()
decomp = [k.composition for k in decomp_entries]
rxn = Reaction([comp], decomp + [elcomp])
rxn.normalize_to(comp)
c = self.get_composition_chempots(cc + elcomp * 1e-5)[element]
amt = -rxn.coeffs[rxn.all_comp.index(elcomp)]
evolution.append({'chempot': c,
'evolution': amt,
'element_reference': elref,
'reaction': rxn, 'entries': decomp_entries})
return evolution
def get_chempot_range_map(self, elements, referenced=True, joggle=True):
"""
Returns a chemical potential range map for each stable entry.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges
of all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: If True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
Returns a dict of the form {entry: [simplices]}. The list of
simplices are the sides of the N-1 dim polytope bounding the
allowable chemical potential range of each entry.
"""
all_chempots = []
pd = self
facets = pd.facets
for facet in facets:
chempots = self._get_facet_chempots(facet)
all_chempots.append([chempots[el] for el in pd.elements])
inds = [pd.elements.index(el) for el in elements]
el_energies = {el: 0.0 for el in elements}
if referenced:
el_energies = {el: pd.el_refs[el].energy_per_atom
for el in elements}
chempot_ranges = collections.defaultdict(list)
vertices = [list(range(len(self.elements)))]
if len(all_chempots) > len(self.elements):
vertices = get_facets(all_chempots, joggle=joggle)
for ufacet in vertices:
for combi in itertools.combinations(ufacet, 2):
data1 = facets[combi[0]]
data2 = facets[combi[1]]
common_ent_ind = set(data1).intersection(set(data2))
if len(common_ent_ind) == len(elements):
common_entries = [pd.qhull_entries[i]
for i in common_ent_ind]
data = np.array([[all_chempots[i][j]
- el_energies[pd.elements[j]]
for j in inds] for i in combi])
sim = Simplex(data)
for entry in common_entries:
chempot_ranges[entry].append(sim)
return chempot_ranges
def getmu_vertices_stability_phase(self, target_comp, dep_elt, tol_en=1e-2):
"""
returns a set of chemical potentials corresponding to the vertices of
the simplex in the chemical potential phase diagram.
The simplex is built using all elements in the target_composition
except dep_elt.
The chemical potential of dep_elt is computed from the target
composition energy.
This method is useful to get the limiting conditions for
defects computations for instance.
Args:
target_comp: A Composition object
dep_elt: the element for which the chemical potential is computed
from the energy of
the stable phase at the target composition
tol_en: a tolerance on the energy to set
Returns:
[{Element:mu}]: An array of conditions on simplex vertices for
which each element has a chemical potential set to a given
value. "absolute" values (i.e., not referenced to element energies)
"""
muref = np.array([self.el_refs[e].energy_per_atom
for e in self.elements if e != dep_elt])
chempot_ranges = self.get_chempot_range_map(
[e for e in self.elements if e != dep_elt])
for e in self.elements:
if not e in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self.elements if e != dep_elt]
for e in chempot_ranges.keys():
if e.composition.reduced_composition == \
target_comp.reduced_composition:
multiplicator = e.composition[dep_elt] / target_comp[dep_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
elts = [e for e in self.elements if e != dep_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = v[i] + muref[i]
res[dep_elt] = (np.dot(v + muref, coeff) + ef) / \
target_comp[dep_elt]
already_in = False
for di in all_coords:
dict_equals = True
for k in di:
if abs(di[k] - res[k]) > tol_en:
dict_equals = False
break
if dict_equals:
already_in = True
break
if not already_in:
all_coords.append(res)
return all_coords
def get_chempot_range_stability_phase(self, target_comp, open_elt):
"""
returns a set of chemical potentials corresponding to the max and min
chemical potential of the open element for a given composition. It is
quite common to have for instance a ternary oxide (e.g., ABO3) for
which you want to know what are the A and B chemical potential leading
to the highest and lowest oxygen chemical potential (reducing and
oxidizing conditions). This is useful for defect computations.
Args:
target_comp: A Composition object
open_elt: Element that you want to constrain to be max or min
Returns:
{Element:(mu_min,mu_max)}: Chemical potentials are given in
"absolute" values (i.e., not referenced to 0)
"""
muref = np.array([self.el_refs[e].energy_per_atom
for e in self.elements if e != open_elt])
chempot_ranges = self.get_chempot_range_map(
[e for e in self.elements if e != open_elt])
for e in self.elements:
if not e in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self.elements if e != open_elt]
max_open = -float('inf')
min_open = float('inf')
max_mus = None
min_mus = None
for e in chempot_ranges.keys():
if e.composition.reduced_composition == \
target_comp.reduced_composition:
multiplicator = e.composition[open_elt] / target_comp[open_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
all_coords.append(v)
if (np.dot(v + muref, coeff) + ef) / target_comp[
open_elt] > max_open:
max_open = (np.dot(v + muref, coeff) + ef) / \
target_comp[open_elt]
max_mus = v
if (np.dot(v + muref, coeff) + ef) / target_comp[
open_elt] < min_open:
min_open = (np.dot(v + muref, coeff) + ef) / \
target_comp[open_elt]
min_mus = v
elts = [e for e in self.elements if e != open_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = (min_mus[i] + muref[i], max_mus[i] + muref[i])
res[open_elt] = (min_open, max_open)
return res
class GrandPotentialPhaseDiagram(PhaseDiagram):
"""
A class representing a Grand potential phase diagram. Grand potential phase
diagrams are essentially phase diagrams that are open to one or more
components. To construct such phase diagrams, the relevant free energy is
the grand potential, which can be written as the Legendre transform of the
Gibbs free energy as follows
Grand potential = G - u_X N_X
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
"""
def __init__(self, entries, chempots, elements=None):
"""
Standard constructor for grand potential phase diagram.
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
chempots {Element: float}: Specify the chemical potentials
of the open elements.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves.
"""
if elements is None:
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
self.chempots = {get_el_sp(el): u for el, u in chempots.items()}
elements = set(elements).difference(self.chempots.keys())
all_entries = []
for e in entries:
if len(set(e.composition.elements).intersection(set(elements))) > 0:
all_entries.append(GrandPotPDEntry(e, self.chempots))
super().__init__(all_entries, elements)
def __str__(self):
output = []
chemsys = "-".join([el.symbol for el in self.elements])
output.append("{} grand potential phase diagram with ".format(chemsys))
output[-1] += ", ".join(["u{}={}".format(el, v)
for el, v in self.chempots.items()])
output.append("{} stable phases: ".format(len(self.stable_entries)))
output.append(", ".join([entry.name
for entry in self.stable_entries]))
return "\n".join(output)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"chempots": self.chempots,
"elements": [e.as_dict() for e in self.elements]}
@classmethod
def from_dict(cls, d):
entries = MontyDecoder().process_decoded(d["all_entries"])
elements = MontyDecoder().process_decoded(d["elements"])
return cls(entries, d["chempots"], elements)
class CompoundPhaseDiagram(PhaseDiagram):
"""
Generates phase diagrams from compounds as terminations instead of
elements.
"""
# Tolerance for determining if amount of a composition is positive.
amount_tol = 1e-5
def __init__(self, entries, terminal_compositions,
normalize_terminal_compositions=True):
"""
Initializes a CompoundPhaseDiagram.
Args:
entries ([PDEntry]): Sequence of input entries. For example,
if you want a Li2O-P2O5 phase diagram, you might have all
Li-P-O entries as an input.
terminal_compositions ([Composition]): Terminal compositions of
phase space. In the Li2O-P2O5 example, these will be the
Li2O and P2O5 compositions.
normalize_terminal_compositions (bool): Whether to normalize the
terminal compositions to a per atom basis. If normalized,
the energy above hulls will be consistent
for comparison across systems. Non-normalized terminals are
more intuitive in terms of compositional breakdowns.
"""
self.original_entries = entries
self.terminal_compositions = terminal_compositions
self.normalize_terminals = normalize_terminal_compositions
(pentries, species_mapping) = \
self.transform_entries(entries, terminal_compositions)
self.species_mapping = species_mapping
super().__init__(
pentries, elements=species_mapping.values())
def transform_entries(self, entries, terminal_compositions):
"""
Method to transform all entries to the composition coordinate in the
terminal compositions. If the entry does not fall within the space
defined by the terminal compositions, they are excluded. For example,
Li3PO4 is mapped into a Li2O:1.5, P2O5:0.5 composition. The terminal
compositions are represented by DummySpecies.
Args:
entries: Sequence of all input entries
terminal_compositions: Terminal compositions of phase space.
Returns:
Sequence of TransformedPDEntries falling within the phase space.
"""
new_entries = []
if self.normalize_terminals:
fractional_comp = [c.fractional_composition
for c in terminal_compositions]
else:
fractional_comp = terminal_compositions
# Map terminal compositions to unique dummy species.
sp_mapping = collections.OrderedDict()
for i, comp in enumerate(fractional_comp):
sp_mapping[comp] = DummySpecie("X" + chr(102 + i))
for entry in entries:
try:
rxn = Reaction(fractional_comp, [entry.composition])
rxn.normalize_to(entry.composition)
# We only allow reactions that have positive amounts of
# reactants.
if all([rxn.get_coeff(comp) <= CompoundPhaseDiagram.amount_tol
for comp in fractional_comp]):
newcomp = {sp_mapping[comp]: -rxn.get_coeff(comp)
for comp in fractional_comp}
newcomp = {k: v for k, v in newcomp.items()
if v > CompoundPhaseDiagram.amount_tol}
transformed_entry = \
TransformedPDEntry(Composition(newcomp), entry)
new_entries.append(transformed_entry)
except ReactionError:
# If the reaction can't be balanced, the entry does not fall
# into the phase space. We ignore them.
pass
return new_entries, sp_mapping
def as_dict(self):
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"original_entries": [e.as_dict() for e in self.original_entries],
"terminal_compositions": [c.as_dict()
for c in self.terminal_compositions],
"normalize_terminal_compositions":
self.normalize_terminals}
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
entries = dec.process_decoded(d["original_entries"])
terminal_compositions = dec.process_decoded(d["terminal_compositions"])
return cls(entries, terminal_compositions,
d["normalize_terminal_compositions"])
class ReactionDiagram:
def __init__(self, entry1, entry2, all_entries, tol=1e-4,
float_fmt="%.4f"):
"""
Analyzes the possible reactions between a pair of compounds, e.g.,
an electrolyte and an electrode.
Args:
entry1 (ComputedEntry): Entry for 1st component. Note that
corrections, if any, must already be pre-applied. This is to
give flexibility for different kinds of corrections, e.g.,
if a particular entry is fitted to an experimental data (such
as EC molecule).
entry2 (ComputedEntry): Entry for 2nd component. Note that
corrections must already be pre-applied. This is to
give flexibility for different kinds of corrections, e.g.,
if a particular entry is fitted to an experimental data (such
as EC molecule).
all_entries ([ComputedEntry]): All other entries to be
considered in the analysis. Note that corrections, if any,
must already be pre-applied.
tol (float): Tolerance to be used to determine validity of reaction.
float_fmt (str): Formatting string to be applied to all floats.
Determines number of decimal places in reaction string.
"""
elements = set()
for e in [entry1, entry2]:
elements.update([el.symbol for el in e.composition.elements])
elements = tuple(elements) # Fix elements to ensure order.
comp_vec1 = np.array([entry1.composition.get_atomic_fraction(el)
for el in elements])
comp_vec2 = np.array([entry2.composition.get_atomic_fraction(el)
for el in elements])
r1 = entry1.composition.reduced_composition
r2 = entry2.composition.reduced_composition
logger.debug("%d total entries." % len(all_entries))
pd = PhaseDiagram(all_entries + [entry1, entry2])
terminal_formulas = [entry1.composition.reduced_formula,
entry2.composition.reduced_formula]
logger.debug("%d stable entries" % len(pd.stable_entries))
logger.debug("%d facets" % len(pd.facets))
logger.debug("%d qhull_entries" % len(pd.qhull_entries))
rxn_entries = []
done = []
fmt = lambda fl: float_fmt % fl
for facet in pd.facets:
for face in itertools.combinations(facet, len(facet) - 1):
face_entries = [pd.qhull_entries[i] for i in face]
if any([e.composition.reduced_formula in terminal_formulas
for e in face_entries]):
continue
try:
m = []
for e in face_entries:
m.append([e.composition.get_atomic_fraction(el)
for el in elements])
m.append(comp_vec2 - comp_vec1)
m = np.array(m).T
coeffs = np.linalg.solve(m, comp_vec2)
x = coeffs[-1]
if all([c >= -tol for c in coeffs]) and \
(abs(sum(coeffs[:-1]) - 1) < tol) and \
(tol < x < 1 - tol):
c1 = x / r1.num_atoms
c2 = (1 - x) / r2.num_atoms
factor = 1 / (c1 + c2)
c1 *= factor
c2 *= factor
# Avoid duplicate reactions.
if any([np.allclose([c1, c2], cc) for cc in done]):
continue
done.append((c1, c2))
rxn_str = "%s %s + %s %s -> " % (
fmt(c1), r1.reduced_formula,
fmt(c2), r2.reduced_formula)
products = []
energy = - (x * entry1.energy_per_atom +
(1 - x) * entry2.energy_per_atom)
for c, e in zip(coeffs[:-1], face_entries):
if c > tol:
r = e.composition.reduced_composition
products.append("%s %s" % (
fmt(c / r.num_atoms * factor),
r.reduced_formula))
energy += c * e.energy_per_atom
rxn_str += " + ".join(products)
comp = x * comp_vec1 + (1 - x) * comp_vec2
entry = PDEntry(
Composition(dict(zip(elements, comp))),
energy=energy, attribute=rxn_str)
rxn_entries.append(entry)
except np.linalg.LinAlgError as ex:
logger.debug("Reactants = %s" % (", ".join([
entry1.composition.reduced_formula,
entry2.composition.reduced_formula])))
logger.debug("Products = %s" % (
", ".join([e.composition.reduced_formula
for e in face_entries])))
rxn_entries = sorted(rxn_entries, key=lambda e: e.name, reverse=True)
self.entry1 = entry1
self.entry2 = entry2
self.rxn_entries = rxn_entries
self.labels = collections.OrderedDict()
for i, e in enumerate(rxn_entries):
self.labels[str(i + 1)] = e.attribute
e.name = str(i + 1)
self.all_entries = all_entries
self.pd = pd
def get_compound_pd(self):
"""
Get the CompoundPhaseDiagram object, which can then be used for
plotting.
Returns:
(CompoundPhaseDiagram)
"""
# For this plot, since the reactions are reported in formation
# energies, we need to set the energies of the terminal compositions
# to 0. So we make create copies with 0 energy.
entry1 = PDEntry(self.entry1.composition, 0)
entry2 = PDEntry(self.entry2.composition, 0)
cpd = CompoundPhaseDiagram(
self.rxn_entries + [entry1, entry2],
[Composition(entry1.composition.reduced_formula),
Composition(entry2.composition.reduced_formula)],
normalize_terminal_compositions=False)
return cpd
class PhaseDiagramError(Exception):
"""
An exception class for Phase Diagram generation.
"""
pass
def get_facets(qhull_data, joggle=False):
"""
Get the simplex facets for the Convex hull.
Args:
qhull_data (np.ndarray): The data from which to construct the convex
hull as a Nxd array (N being number of data points and d being the
dimension)
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
List of simplices of the Convex Hull.
"""
if joggle:
return ConvexHull(qhull_data, qhull_options="QJ i").simplices
else:
return ConvexHull(qhull_data, qhull_options="Qt i").simplices
class PDPlotter:
"""
A plotter class for phase diagrams.
Args:
phasediagram: PhaseDiagram object.
show_unstable (float): Whether unstable phases will be plotted as
well as red crosses. If a number > 0 is entered, all phases with
ehull < show_unstable will be shown.
\\*\\*plotkwargs: Keyword args passed to matplotlib.pyplot.plot. Can
be used to customize markers etc. If not set, the default is
{
"markerfacecolor": (0.2157, 0.4941, 0.7216),
"markersize": 10,
"linewidth": 3
}
"""
def __init__(self, phasediagram, show_unstable=0, **plotkwargs):
# note: palettable imports matplotlib
from palettable.colorbrewer.qualitative import Set1_3
self._pd = phasediagram
self._dim = len(self._pd.elements)
if self._dim > 4:
raise ValueError("Only 1-4 components supported!")
self.lines = uniquelines(self._pd.facets) if self._dim > 1 else \
[[self._pd.facets[0][0], self._pd.facets[0][0]]]
self.show_unstable = show_unstable
colors = Set1_3.mpl_colors
self.plotkwargs = plotkwargs or {
"markerfacecolor": colors[2],
"markersize": 10,
"linewidth": 3
}
@property
def pd_plot_data(self):
"""
Plot data for phase diagram.
2-comp - Full hull with energies
3/4-comp - Projection into 2D or 3D Gibbs triangle.
Returns:
(lines, stable_entries, unstable_entries):
- lines is a list of list of coordinates for lines in the PD.
- stable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- unstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
lines = []
stable_entries = {}
for line in self.lines:
entry1 = entries[line[0]]
entry2 = entries[line[1]]
if self._dim < 3:
x = [data[line[0]][0], data[line[1]][0]]
y = [pd.get_form_energy_per_atom(entry1),
pd.get_form_energy_per_atom(entry2)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord(data[line, 0:2])
else:
coord = tet_coord(data[line, 0:3])
lines.append(coord)
labelcoord = list(zip(*coord))
stable_entries[labelcoord[0]] = entry1
stable_entries[labelcoord[1]] = entry2
all_entries = pd.all_entries
all_data = np.array(pd.all_entries_hulldata)
unstable_entries = dict()
stable = pd.stable_entries
for i in range(0, len(all_entries)):
entry = all_entries[i]
if entry not in stable:
if self._dim < 3:
x = [all_data[i][0], all_data[i][0]]
y = [pd.get_form_energy_per_atom(entry),
pd.get_form_energy_per_atom(entry)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord([all_data[i, 0:2],
all_data[i, 0:2]])
else:
coord = tet_coord([all_data[i, 0:3], all_data[i, 0:3],
all_data[i, 0:3]])
labelcoord = list(zip(*coord))
unstable_entries[entry] = labelcoord[0]
return lines, stable_entries, unstable_entries
def get_plot(self, label_stable=True, label_unstable=True, ordering=None,
energy_colormap=None, process_attributes=False, plt=None):
if self._dim < 4:
plt = self._get_2d_plot(label_stable, label_unstable, ordering,
energy_colormap, plt=plt,
process_attributes=process_attributes)
elif self._dim == 4:
plt = self._get_3d_plot(label_stable)
return plt
def plot_element_profile(self, element, comp, show_label_index=None,
xlim=5):
"""
Draw the element profile plot for a composition varying different
chemical potential of an element.
X value is the negative value of the chemical potential reference to
elemental chemical potential. For example, if choose Element("Li"),
X= -(µLi-µLi0), which corresponds to the voltage versus metal anode.
Y values represent for the number of element uptake in this composition
(unit: per atom). All reactions are printed to help choosing the
profile steps you want to show label in the plot.
Args:
element (Element): An element of which the chemical potential is
considered. It also must be in the phase diagram.
comp (Composition): A composition.
show_label_index (list of integers): The labels for reaction products
you want to show in the plot. Default to None (not showing any
annotation for reaction products). For the profile steps you want
to show the labels, just add it to the show_label_index. The
profile step counts from zero. For example, you can set
show_label_index=[0, 2, 5] to label profile step 0,2,5.
xlim (float): The max x value. x value is from 0 to xlim. Default to
5 eV.
Returns:
Plot of element profile evolution by varying the chemical potential
of an element.
"""
plt = pretty_plot(12, 8)
pd = self._pd
evolution = pd.get_element_profile(element, comp)
num_atoms = evolution[0]["reaction"].reactants[0].num_atoms
element_energy = evolution[0]['chempot']
for i, d in enumerate(evolution):
v = -(d["chempot"] - element_energy)
print ("index= %s, -\u0394\u03BC=%.4f(eV)," % (i, v), d["reaction"])
if i != 0:
plt.plot([x2, x2], [y1, d["evolution"] / num_atoms],
'k', linewidth=2.5)
x1 = v
y1 = d["evolution"] / num_atoms
if i != len(evolution) - 1:
x2 = - (evolution[i + 1]["chempot"] - element_energy)
else:
x2 = 5.0
if show_label_index is not None and i in show_label_index:
products = [re.sub(r"(\d+)", r"$_{\1}$", p.reduced_formula)
for p in d["reaction"].products
if p.reduced_formula != element.symbol]
plt.annotate(", ".join(products), xy=(v + 0.05, y1 + 0.05),
fontsize=24, color='r')
plt.plot([x1, x2], [y1, y1], 'r', linewidth=3)
else:
plt.plot([x1, x2], [y1, y1], 'k', linewidth=2.5)
plt.xlim((0, xlim))
plt.xlabel("-$\\Delta{\\mu}$ (eV)")
plt.ylabel("Uptake per atom")
return plt
def show(self, *args, **kwargs):
"""
Draws the phase diagram using Matplotlib and show it.
Args:
\\*args: Passed to get_plot.
\\*\\*kwargs: Passed to get_plot.
"""
self.get_plot(*args, **kwargs).show()
def _get_2d_plot(self, label_stable=True, label_unstable=True,
ordering=None, energy_colormap=None, vmin_mev=-60.0,
vmax_mev=60.0, show_colorbar=True,
process_attributes=False, plt=None):
"""
Shows the plot using pylab. Usually I won't do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way.
"""
if plt is None:
plt = pretty_plot(8, 6)
from matplotlib.font_manager import FontProperties
if ordering is None:
(lines, labels, unstable) = self.pd_plot_data
else:
(_lines, _labels, _unstable) = self.pd_plot_data
(lines, labels, unstable) = order_phase_diagram(
_lines, _labels, _unstable, ordering)
if energy_colormap is None:
if process_attributes:
for x, y in lines:
plt.plot(x, y, "k-", linewidth=3, markeredgecolor="k")
# One should think about a clever way to have "complex"
# attributes with complex processing options but with a clear
# logic. At this moment, I just use the attributes to know
# whether an entry is a new compound or an existing (from the
# ICSD or from the MP) one.
for x, y in labels.keys():
if labels[(x, y)].attribute is None or \
labels[(x, y)].attribute == "existing":
plt.plot(x, y, "ko", **self.plotkwargs)
else:
plt.plot(x, y, "k*", **self.plotkwargs)
else:
for x, y in lines:
plt.plot(x, y, "ko-", **self.plotkwargs)
else:
from matplotlib.colors import Normalize, LinearSegmentedColormap
from matplotlib.cm import ScalarMappable
for x, y in lines:
plt.plot(x, y, "k-", markeredgecolor="k")
vmin = vmin_mev / 1000.0
vmax = vmax_mev / 1000.0
if energy_colormap == 'default':
mid = - vmin / (vmax - vmin)
cmap = LinearSegmentedColormap.from_list(
'my_colormap', [(0.0, '#005500'), (mid, '#55FF55'),
(mid, '#FFAAAA'), (1.0, '#FF0000')])
else:
cmap = energy_colormap
norm = Normalize(vmin=vmin, vmax=vmax)
_map = ScalarMappable(norm=norm, cmap=cmap)
_energies = [self._pd.get_equilibrium_reaction_energy(entry)
for coord, entry in labels.items()]
energies = [en if en < 0.0 else -0.00000001 for en in _energies]
vals_stable = _map.to_rgba(energies)
ii = 0
if process_attributes:
for x, y in labels.keys():
if labels[(x, y)].attribute is None or \
labels[(x, y)].attribute == "existing":
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii],
markersize=12)
else:
plt.plot(x, y, "*", markerfacecolor=vals_stable[ii],
markersize=18)
ii += 1
else:
for x, y in labels.keys():
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii],
markersize=15)
ii += 1
font = FontProperties()
font.set_weight("bold")
font.set_size(24)
# Sets a nice layout depending on the type of PD. Also defines a
# "center" for the PD, which then allows the annotations to be spread
# out in a nice manner.
if len(self._pd.elements) == 3:
plt.axis("equal")
plt.xlim((-0.1, 1.2))
plt.ylim((-0.1, 1.0))
plt.axis("off")
center = (0.5, math.sqrt(3) / 6)
else:
all_coords = labels.keys()
miny = min([c[1] for c in all_coords])
ybuffer = max(abs(miny) * 0.1, 0.1)
plt.xlim((-0.1, 1.1))
plt.ylim((miny - ybuffer, ybuffer))
center = (0.5, miny / 2)
plt.xlabel("Fraction", fontsize=28, fontweight='bold')
plt.ylabel("Formation energy (eV/fu)", fontsize=28,
fontweight='bold')
for coords in sorted(labels.keys(), key=lambda x: -x[1]):
entry = labels[coords]
label = entry.name
# The follow defines an offset for the annotation text emanating
# from the center of the PD. Results in fairly nice layouts for the
# most part.
vec = (np.array(coords) - center)
vec = vec / np.linalg.norm(vec) * 10 if np.linalg.norm(vec) != 0 \
else vec
valign = "bottom" if vec[1] > 0 else "top"
if vec[0] < -0.01:
halign = "right"
elif vec[0] > 0.01:
halign = "left"
else:
halign = "center"
if label_stable:
if process_attributes and entry.attribute == 'new':
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font,
color='g')
else:
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font)
if self.show_unstable:
font = FontProperties()
font.set_size(16)
energies_unstable = [self._pd.get_e_above_hull(entry)
for entry, coord in unstable.items()]
if energy_colormap is not None:
energies.extend(energies_unstable)
vals_unstable = _map.to_rgba(energies_unstable)
ii = 0
for entry, coords in unstable.items():
ehull = self._pd.get_e_above_hull(entry)
if ehull < self.show_unstable:
vec = (np.array(coords) - center)
vec = vec / np.linalg.norm(vec) * 10 \
if np.linalg.norm(vec) != 0 else vec
label = entry.name
if energy_colormap is None:
plt.plot(coords[0], coords[1], "ks", linewidth=3,
markeredgecolor="k", markerfacecolor="r",
markersize=8)
else:
plt.plot(coords[0], coords[1], "s", linewidth=3,
markeredgecolor="k",
markerfacecolor=vals_unstable[ii],
markersize=8)
if label_unstable:
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign, color="b",
verticalalignment=valign,
fontproperties=font)
ii += 1
if energy_colormap is not None and show_colorbar:
_map.set_array(energies)
cbar = plt.colorbar(_map)
cbar.set_label(
'Energy [meV/at] above hull (in red)\nInverse energy ['
'meV/at] above hull (in green)',
rotation=-90, ha='left', va='center')
ticks = cbar.ax.get_yticklabels()
# cbar.ax.set_yticklabels(['${v}$'.format(
# v=float(t.get_text().strip('$'))*1000.0) for t in ticks])
f = plt.gcf()
f.set_size_inches((8, 6))
plt.subplots_adjust(left=0.09, right=0.98, top=0.98, bottom=0.07)
return plt
def _get_3d_plot(self, label_stable=True):
"""
Shows the plot using pylab. Usually I won"t do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way.
"""
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib.font_manager import FontProperties
fig = plt.figure()
ax = p3.Axes3D(fig)
font = FontProperties()
font.set_weight("bold")
font.set_size(20)
(lines, labels, unstable) = self.pd_plot_data
count = 1
newlabels = list()
for x, y, z in lines:
ax.plot(x, y, z, "bo-", linewidth=3, markeredgecolor="b",
markerfacecolor="r", markersize=10)
for coords in sorted(labels.keys()):
entry = labels[coords]
label = entry.name
if label_stable:
if len(entry.composition.elements) == 1:
ax.text(coords[0], coords[1], coords[2], label)
else:
ax.text(coords[0], coords[1], coords[2], str(count))
newlabels.append("{} : {}".format(count, latexify(label)))
count += 1
plt.figtext(0.01, 0.01, "\n".join(newlabels))
ax.axis("off")
return plt
def write_image(self, stream, image_format="svg", **kwargs):
"""
Writes the phase diagram to an image in a stream.
Args:
stream:
stream to write to. Can be a file stream or a StringIO stream.
image_format
format for image. Can be any of matplotlib supported formats.
Defaults to svg for best results for vector graphics.
\\*\\*kwargs: Pass through to get_plot functino.
"""
plt = self.get_plot(**kwargs)
f = plt.gcf()
f.set_size_inches((12, 10))
plt.savefig(stream, format=image_format)
def plot_chempot_range_map(self, elements, referenced=True):
"""
Plot the chemical potential range _map. Currently works only for
3-component PDs.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: if True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
"""
self.get_chempot_range_map_plot(elements, referenced=referenced).show()
def get_chempot_range_map_plot(self, elements, referenced=True):
"""
Returns a plot of the chemical potential range _map. Currently works
only for 3-component PDs.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: if True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
Returns:
A matplotlib plot object.
"""
plt = pretty_plot(12, 8)
chempot_ranges = self._pd.get_chempot_range_map(
elements, referenced=referenced)
missing_lines = {}
excluded_region = []
for entry, lines in chempot_ranges.items():
comp = entry.composition
center_x = 0
center_y = 0
coords = []
contain_zero = any([comp.get_atomic_fraction(el) == 0
for el in elements])
is_boundary = (not contain_zero) and \
sum([comp.get_atomic_fraction(el) for el in
elements]) == 1
for line in lines:
(x, y) = line.coords.transpose()
plt.plot(x, y, "k-")
for coord in line.coords:
if not in_coord_list(coords, coord):
coords.append(coord.tolist())
center_x += coord[0]
center_y += coord[1]
if is_boundary:
excluded_region.extend(line.coords)
if coords and contain_zero:
missing_lines[entry] = coords
else:
xy = (center_x / len(coords), center_y / len(coords))
plt.annotate(latexify(entry.name), xy, fontsize=22)
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Shade the forbidden chemical potential regions.
excluded_region.append([xlim[1], ylim[1]])
excluded_region = sorted(excluded_region, key=lambda c: c[0])
(x, y) = np.transpose(excluded_region)
plt.fill(x, y, "0.80")
# The hull does not generate the missing horizontal and vertical lines.
# The following code fixes this.
el0 = elements[0]
el1 = elements[1]
for entry, coords in missing_lines.items():
center_x = sum([c[0] for c in coords])
center_y = sum([c[1] for c in coords])
comp = entry.composition
is_x = comp.get_atomic_fraction(el0) < 0.01
is_y = comp.get_atomic_fraction(el1) < 0.01
n = len(coords)
if not (is_x and is_y):
if is_x:
coords = sorted(coords, key=lambda c: c[1])
for i in [0, -1]:
x = [min(xlim), coords[i][0]]
y = [coords[i][1], coords[i][1]]
plt.plot(x, y, "k")
center_x += min(xlim)
center_y += coords[i][1]
elif is_y:
coords = sorted(coords, key=lambda c: c[0])
for i in [0, -1]:
x = [coords[i][0], coords[i][0]]
y = [coords[i][1], min(ylim)]
plt.plot(x, y, "k")
center_x += coords[i][0]
center_y += min(ylim)
xy = (center_x / (n + 2), center_y / (n + 2))
else:
center_x = sum(coord[0] for coord in coords) + xlim[0]
center_y = sum(coord[1] for coord in coords) + ylim[0]
xy = (center_x / (n + 1), center_y / (n + 1))
plt.annotate(latexify(entry.name), xy,
horizontalalignment="center",
verticalalignment="center", fontsize=22)
plt.xlabel("$\\mu_{{{0}}} - \\mu_{{{0}}}^0$ (eV)"
.format(el0.symbol))
plt.ylabel("$\\mu_{{{0}}} - \\mu_{{{0}}}^0$ (eV)"
.format(el1.symbol))
plt.tight_layout()
return plt
def get_contour_pd_plot(self):
"""
Plot a contour phase diagram plot, where phase triangles are colored
according to degree of instability by interpolation. Currently only
works for 3-component phase diagrams.
Returns:
A matplotlib plot object.
"""
from scipy import interpolate
from matplotlib import cm
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
plt = self._get_2d_plot()
data[:, 0:2] = triangular_coord(data[:, 0:2]).transpose()
for i, e in enumerate(entries):
data[i, 2] = self._pd.get_e_above_hull(e)
gridsize = 0.005
xnew = np.arange(0, 1., gridsize)
ynew = np.arange(0, 1, gridsize)
f = interpolate.LinearNDInterpolator(data[:, 0:2], data[:, 2])
znew = np.zeros((len(ynew), len(xnew)))
for (i, xval) in enumerate(xnew):
for (j, yval) in enumerate(ynew):
znew[j, i] = f(xval, yval)
plt.contourf(xnew, ynew, znew, 1000, cmap=cm.autumn_r)
plt.colorbar()
return plt
def uniquelines(q):
"""
Given all the facets, convert it into a set of unique lines. Specifically
used for converting convex hull facets into line pairs of coordinates.
Args:
q: A 2-dim sequence, where each row represents a facet. E.g.,
[[1,2,3],[3,6,7],...]
Returns:
setoflines:
A set of tuple of lines. E.g., ((1,2), (1,3), (2,3), ....)
"""
setoflines = set()
for facets in q:
for line in itertools.combinations(facets, 2):
setoflines.add(tuple(sorted(line)))
return setoflines
def triangular_coord(coord):
"""
Convert a 2D coordinate into a triangle-based coordinate system for a
prettier phase diagram.
Args:
coordinate: coordinate used in the convex hull computation.
Returns:
coordinates in a triangular-based coordinate system.
"""
unitvec = np.array([[1, 0], [0.5, math.sqrt(3) / 2]])
result = np.dot(np.array(coord), unitvec)
return result.transpose()
def tet_coord(coord):
"""
Convert a 3D coordinate into a tetrahedron based coordinate system for a
prettier phase diagram.
Args:
coordinate: coordinate used in the convex hull computation.
Returns:
coordinates in a tetrahedron-based coordinate system.
"""
unitvec = np.array([[1, 0, 0], [0.5, math.sqrt(3) / 2, 0],
[0.5, 1.0 / 3.0 * math.sqrt(3) / 2, math.sqrt(6) / 3]])
result = np.dot(np.array(coord), unitvec)
return result.transpose()
def order_phase_diagram(lines, stable_entries, unstable_entries, ordering):
"""
Orders the entries (their coordinates) in a phase diagram plot according
to the user specified ordering.
Ordering should be given as ['Up', 'Left', 'Right'], where Up,
Left and Right are the names of the entries in the upper, left and right
corners of the triangle respectively.
Args:
lines: list of list of coordinates for lines in the PD.
stable_entries: {coordinate : entry} for each stable node in the
phase diagram. (Each coordinate can only have one stable phase)
unstable_entries: {entry: coordinates} for all unstable nodes in the
phase diagram.
ordering: Ordering of the phase diagram, given as a list ['Up',
'Left','Right']
Returns:
(newlines, newstable_entries, newunstable_entries):
- newlines is a list of list of coordinates for lines in the PD.
- newstable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- newunstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
yup = -1000.0
xleft = 1000.0
xright = -1000.0
for coord in stable_entries:
if coord[0] > xright:
xright = coord[0]
nameright = stable_entries[coord].name
if coord[0] < xleft:
xleft = coord[0]
nameleft = stable_entries[coord].name
if coord[1] > yup:
yup = coord[1]
nameup = stable_entries[coord].name
if (not nameup in ordering) or (not nameright in ordering) or \
(not nameleft in ordering):
raise ValueError(
'Error in ordering_phase_diagram : \n"{up}", "{left}" and "{'
'right}"'
' should be in ordering : {ord}'.format(up=nameup, left=nameleft,
right=nameright,
ord=ordering))
cc = np.array([0.5, np.sqrt(3.0) / 6.0], np.float)
if nameup == ordering[0]:
if nameleft == ordering[1]:
# The coordinates were already in the user ordering
return lines, stable_entries, unstable_entries
else:
newlines = [[np.array(1.0 - x), y] for x, y in lines]
newstable_entries = {(1.0 - c[0], c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {entry: (1.0 - c[0], c[1])
for entry, c in
unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
elif nameup == ordering[1]:
if nameleft == ordering[2]:
c120 = np.cos(2.0 * np.pi / 3.0)
s120 = np.sin(2.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = c120 * (xx - cc[0]) - s120 * (y[ii] - cc[1]) + \
cc[0]
newy[ii] = s120 * (xx - cc[0]) + c120 * (y[ii] - cc[1]) + \
cc[1]
newlines.append([newx, newy])
newstable_entries = {
(c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0],
s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0],
s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
else:
c120 = np.cos(2.0 * np.pi / 3.0)
s120 = np.sin(2.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = -c120 * (xx - 1.0) - s120 * y[ii] + 1.0
newy[ii] = -s120 * (xx - 1.0) + c120 * y[ii]
newlines.append([newx, newy])
newstable_entries = {(-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0,
-s120 * (c[0] - 1.0) + c120 * c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0,
-s120 * (c[0] - 1.0) + c120 * c[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
elif nameup == ordering[2]:
if nameleft == ordering[0]:
c240 = np.cos(4.0 * np.pi / 3.0)
s240 = np.sin(4.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = c240 * (xx - cc[0]) - s240 * (y[ii] - cc[1]) + \
cc[0]
newy[ii] = s240 * (xx - cc[0]) + c240 * (y[ii] - cc[1]) + \
cc[1]
newlines.append([newx, newy])
newstable_entries = {
(c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0],
s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0],
s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
else:
c240 = np.cos(4.0 * np.pi / 3.0)
s240 = np.sin(4.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = -c240 * xx - s240 * y[ii]
newy[ii] = -s240 * xx + c240 * y[ii]
newlines.append([newx, newy])
newstable_entries = {(-c240 * c[0] - s240 * c[1],
-s240 * c[0] + c240 * c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {entry: (-c240 * c[0] - s240 * c[1],
-s240 * c[0] + c240 * c[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
| mit |
tomlof/scikit-learn | sklearn/model_selection/_validation.py | 6 | 38471 | """
The :mod:`sklearn.model_selection._validation` module includes classes and
functions to validate the model.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from ..base import is_classifier, clone
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.fixes import astype
from ..utils.validation import _is_arraylike, _num_samples
from ..utils.metaestimators import _safe_split
from ..externals.joblib import Parallel, delayed, logger
from ..metrics.scorer import check_scoring
from ..exceptions import FitFailedWarning
from ._split import check_cv
from ..preprocessing import LabelEncoder
__all__ = ['cross_val_score', 'cross_val_predict', 'permutation_test_score',
'learning_curve', 'validation_curve']
def cross_val_score(estimator, X, y=None, groups=None, scoring=None, cv=None,
n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv.split(X, y, groups))
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
fit_time = time.time() - start_time
test_score = _score(estimator, X_test, y_test, scorer)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
total_time = score_time + fit_time
end_msg = "%s, total=%s" % (msg, logger.short_format_time(total_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score, test_score] if return_train_score else [test_score]
if return_n_test_samples:
ret.append(_num_samples(X_test))
if return_times:
ret.extend([fit_time, score_time])
if return_parameters:
ret.append(parameters)
return ret
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def cross_val_predict(estimator, X, y=None, groups=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs',
method='predict'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
method : string, optional, default: 'predict'
Invokes the passed method name of the passed estimator. For
method='predict_proba', the columns correspond to the classes
in sorted order.
Returns
-------
predictions : ndarray
This is the result of calling ``method``
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Ensure the estimator has implemented the passed decision function
if not callable(getattr(estimator, method)):
raise AttributeError('{} not implemented in estimator'
.format(method))
if method in ['decision_function', 'predict_proba', 'predict_log_proba']:
le = LabelEncoder()
y = le.fit_transform(y)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
prediction_blocks = parallel(delayed(_fit_and_predict)(
clone(estimator), X, y, train, test, verbose, fit_params, method)
for train, test in cv.split(X, y, groups))
# Concatenate the predictions
predictions = [pred_block_i for pred_block_i, _ in prediction_blocks]
test_indices = np.concatenate([indices_i
for _, indices_i in prediction_blocks])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
# Check for sparse predictions
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
else:
predictions = np.concatenate(predictions)
return predictions[inv_test_indices]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params,
method):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
method : string
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : sequence
Result of calling 'estimator.method'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
predictions = func(X_test)
if method in ['decision_function', 'predict_proba', 'predict_log_proba']:
n_classes = len(set(y))
predictions_ = np.zeros((X_test.shape[0], n_classes))
if method == 'decision_function' and len(estimator.classes_) == 2:
predictions_[:, estimator.classes_[-1]] = predictions
else:
predictions_[:, estimator.classes_] = predictions
predictions = predictions_
return predictions, test
def _check_is_permutation(indices, n_samples):
"""Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
integer array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(indices) is np.arange(n)
"""
if len(indices) != n_samples:
return False
hit = np.zeros(n_samples, dtype=bool)
hit[indices] = True
if not np.all(hit):
return False
return True
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def permutation_test_score(estimator, X, y, groups=None, cv=None,
n_permutations=100, n_jobs=1, random_state=0,
verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Labels to constrain permutation within groups, i.e. ``y`` values
are permuted among samples with the same group identifier.
When not specified, ``y`` values are permuted among all samples.
When a grouped cross-validator is used, the group labels are
also passed on to the ``split`` method of the cross-validator. The
cross-validator uses them for grouping the samples while splitting
the dataset into train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The p-value, which approximates the probability that the score would
be obtained by chance. This is calculated as:
`(C + 1) / (n_permutations + 1)`
Where C is the number of permutations whose score >= the true score.
The best possible p-value is 1/(n_permutations + 1), the worst is 1.0.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, groups, random_state),
groups, cv, scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def _permutation_test_score(estimator, X, y, groups, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv.split(X, y, groups):
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
estimator.fit(X_train, y_train)
avg_score.append(scorer(estimator, X_test, y_test))
return np.mean(avg_score)
def _shuffle(y, groups, random_state):
"""Return a shuffled copy of y eventually shuffle among same groups."""
if groups is None:
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(groups))
for group in np.unique(groups):
this_mask = (groups == group)
indices[this_mask] = random_state.permutation(indices[this_mask])
return safe_indexing(y, indices)
def learning_curve(estimator, X, y, groups=None,
train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None,
exploit_incremental_learning=False, n_jobs=1,
pre_dispatch="all", verbose=0, shuffle=False,
random_state=None):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
shuffle : boolean, optional
Whether to shuffle training data before taking prefixes of it
based on``train_sizes``.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Store it as list as we will be iterating over the list multiple times
cv_iter = list(cv.split(X, y, groups))
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if shuffle:
rng = check_random_state(random_state)
cv_iter = ((rng.permutation(train), test) for train, test in cv_iter)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv_iter)
else:
train_test_proportions = []
for train, test in cv_iter:
for n_train_samples in train_sizes_abs:
train_test_proportions.append((train[:n_train_samples], test))
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train, test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in train_test_proportions)
out = np.array(out)
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, groups=None,
cv=None, scoring=None, n_jobs=1, pre_dispatch="all",
verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py`
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
# NOTE do not change order of iteration to allow one time cv splitters
for train, test in cv.split(X, y, groups) for v in param_range)
out = np.asarray(out)
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
parekhmitchell/Machine-Learning | Machine Learning A-Z Template Folder/Part 2 - Regression/Section 6 - Polynomial Regression/polynomial_regression.py | 4 | 2115 | # Polynomial Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)"""
# Fitting Linear Regression to the dataset
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree = 4)
X_poly = poly_reg.fit_transform(X)
poly_reg.fit(X_poly, y)
lin_reg_2 = LinearRegression()
lin_reg_2.fit(X_poly, y)
# Visualising the Linear Regression results
plt.scatter(X, y, color = 'red')
plt.plot(X, lin_reg.predict(X), color = 'blue')
plt.title('Truth or Bluff (Linear Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Visualising the Polynomial Regression results
plt.scatter(X, y, color = 'red')
plt.plot(X, lin_reg_2.predict(poly_reg.fit_transform(X)), color = 'blue')
plt.title('Truth or Bluff (Polynomial Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Visualising the Polynomial Regression results (for higher resolution and smoother curve)
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, lin_reg_2.predict(poly_reg.fit_transform(X_grid)), color = 'blue')
plt.title('Truth or Bluff (Polynomial Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Predicting a new result with Linear Regression
lin_reg.predict(6.5)
# Predicting a new result with Polynomial Regression
lin_reg_2.predict(poly_reg.fit_transform(6.5)) | mit |
kaiserroll14/301finalproject | main/pandas/tests/test_multilevel.py | 9 | 90175 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101,W0141
import datetime
import itertools
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull, Timestamp
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_append_index(self):
tm._skip_if_no_pytz()
idx1 = Index([1.1, 1.2, 1.3])
idx2 = pd.date_range('2011-01-01', freq='D', periods=3, tz='Asia/Tokyo')
idx3 = Index(['A', 'B', 'C'])
midx_lv2 = MultiIndex.from_arrays([idx1, idx2])
midx_lv3 = MultiIndex.from_arrays([idx1, idx2, idx3])
result = idx1.append(midx_lv2)
# GH 7112
import pytz
tz = pytz.timezone('Asia/Tokyo')
expected_tuples = [(1.1, datetime.datetime(2011, 1, 1, tzinfo=tz)),
(1.2, datetime.datetime(2011, 1, 2, tzinfo=tz)),
(1.3, datetime.datetime(2011, 1, 3, tzinfo=tz))]
expected = Index([1.1, 1.2, 1.3] + expected_tuples)
self.assertTrue(result.equals(expected))
result = midx_lv2.append(idx1)
expected = Index(expected_tuples + [1.1, 1.2, 1.3])
self.assertTrue(result.equals(expected))
result = midx_lv2.append(midx_lv2)
expected = MultiIndex.from_arrays([idx1.append(idx1), idx2.append(idx2)])
self.assertTrue(result.equals(expected))
result = midx_lv2.append(midx_lv3)
self.assertTrue(result.equals(expected))
result = midx_lv3.append(midx_lv2)
expected = Index._simple_new(
np.array([(1.1, datetime.datetime(2011, 1, 1, tzinfo=tz), 'A'),
(1.2, datetime.datetime(2011, 1, 2, tzinfo=tz), 'B'),
(1.3, datetime.datetime(2011, 1, 3, tzinfo=tz), 'C')]
+ expected_tuples), None)
self.assertTrue(result.equals(expected))
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assertIsInstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assertIsInstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assertIsInstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assertIsInstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assertIsInstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected, check_names=False)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
expected.name = 'A'
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
unpickled = self.round_trip_pickle(frame)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEqual(result.index.names, self.frame.index.names)
def test_sorting_repr_8017(self):
np.random.seed(0)
data = np.random.randn(3,4)
for gen, extra in [([1.,3.,2.,5.],4.),
([1,3,2,5],4),
([Timestamp('20130101'),Timestamp('20130103'),Timestamp('20130102'),Timestamp('20130105')],Timestamp('20130104')),
(['1one','3one','2one','5one'],'4one')]:
columns = MultiIndex.from_tuples([('red', i) for i in gen])
df = DataFrame(data, index=list('def'), columns=columns)
df2 = pd.concat([df,DataFrame('world',
index=list('def'),
columns=MultiIndex.from_tuples([('red', extra)]))],axis=1)
# check that the repr is good
# make sure that we have a correct sparsified repr
# e.g. only 1 header of read
self.assertEqual(str(df2).splitlines()[0].split(),['red'])
# GH 8017
# sorting fails after columns added
# construct single-dtype then sort
result = df.copy().sort_index(axis=1)
expected = df.iloc[:,[0,2,1,3]]
assert_frame_equal(result, expected)
result = df2.sort_index(axis=1)
expected = df2.iloc[:,[0,2,1,4,3]]
assert_frame_equal(result, expected)
# setitem then sort
result = df.copy()
result[('red',extra)] = 'world'
result = result.sort_index(axis=1)
assert_frame_equal(result, expected)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assertTrue(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEqual(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assertTrue(isnull(s.values[42:65]).all())
self.assertTrue(notnull(s.values[:42]).all())
self.assertTrue(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assertTrue(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assertTrue((cp.values[:4] == 0).all())
self.assertTrue((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
sliced_a1 = df['A', '1']
sliced_a2 = df['A', '2']
sliced_b1 = df['B', '1']
assert_series_equal(sliced_a1, sliced_b1, check_names=False)
assert_series_equal(sliced_a2, sliced_b1, check_names=False)
self.assertEqual(sliced_a1.name, ('A', '1'))
self.assertEqual(sliced_a2.name, ('A', '2'))
self.assertEqual(sliced_b1.name, ('B', '1'))
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp_idx = MultiIndex.from_tuples([(0, 1, 0)], names=['a', 'b', 'c'])
xp = Series(['x'], index=xp_idx, name='data')
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assertTrue((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assertTrue((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assertIsInstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEqual(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEqual(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
# preserve names
self.assertEqual(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int32)
# it works!
result = df.sortlevel(0)
self.assertTrue((result.dtypes.values == df.dtypes.values).all() == True)
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple for tuple in cart_product(['foo', 'bar'],
[10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples,
names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assertTrue(com.is_integer_dtype(deleveled['prm1']))
self.assertTrue(com.is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEqual(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assertIsInstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assertIsInstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count()
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
self.ymd.ix[1, [1, 2]] = np.nan
self.ymd.ix[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
assertRaisesRegexp(TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
assert_almost_equal(result.columns, ['A', 'B', 'C'])
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'],
['one', 'two', 'three', 'four']],
labels=[[0, 0, 0, 2, 2],
[2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0], name='A')
assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
assert_frame_equal(result, expected)
def test_get_level_number_out_of_bounds(self):
with assertRaisesRegexp(IndexError, "Too many levels"):
self.frame.index._get_level_number(2)
with assertRaisesRegexp(IndexError, "not a valid level number"):
self.frame.index._get_level_number(-3)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked2 = unstacked.unstack()
# test that ints work
unstacked = self.ymd.astype(int).unstack()
# test that int32 work
unstacked = self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0),
(1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sortlevel(2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort_index(axis=1, ascending=False)
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).ix[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
# GH10417
def check(left, right):
assert_series_equal(left, right)
self.assertFalse(left.index.is_unique)
li, ri = left.index, right.index
for i in range(ri.nlevels):
tm.assert_numpy_array_equal(li.levels[i], ri.levels[i])
tm.assert_numpy_array_equal(li.labels[i], ri.labels[i])
df = DataFrame(np.arange(12).reshape(4, 3),
index=list('abab'),
columns=['1st', '2nd', '3rd'])
mi = MultiIndex(levels=[['a', 'b'], ['1st', '2nd', '3rd']],
labels=[np.tile(np.arange(2).repeat(3), 2),
np.tile(np.arange(3), 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
df.columns = ['1st', '2nd', '1st']
mi = MultiIndex(levels=[['a', 'b'], ['1st', '2nd']],
labels=[np.tile(np.arange(2).repeat(3), 2),
np.tile([0, 1, 0], 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
tpls = ('a', 2), ('b', 1), ('a', 1), ('b', 2)
df.index = MultiIndex.from_tuples(tpls)
mi = MultiIndex(levels=[['a', 'b'], [1, 2], ['1st', '2nd']],
labels=[np.tile(np.arange(2).repeat(3), 2),
np.repeat([1, 0, 1], [3, 6, 3]),
np.tile([0, 1, 0], 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sortlevel(1, axis=1)
stacked = df.stack()
result = df['foo'].stack()
assert_series_equal(stacked['foo'], result, check_names=False)
self.assertIs(result.name, None)
self.assertEqual(stacked['bar'].dtype, np.float_)
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive',
'activ', 'activ', 'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
assert_series_equal(restacked,
result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
self.assertEqual(unstacked.index.name, 'first')
self.assertEqual(unstacked.columns.names, ['exp', 'second'])
restacked = unstacked.stack()
self.assertEqual(restacked.index.names, self.frame.index.names)
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
assert_frame_equal(unstacked, expected)
self.assertEqual(unstacked.columns.names,
expected.columns.names)
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sortlevel(0)
assert_frame_equal(restacked, self.ymd)
self.assertEqual(restacked.index.names, self.ymd.index.names)
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected.ix[:, unstacked.columns])
def test_stack_names_and_numbers(self):
unstacked = self.ymd.unstack(['year', 'month'])
# Can't use mixture of names and numbers to stack
with assertRaisesRegexp(ValueError, "level should contain"):
unstacked.stack([0, 'month'])
def test_stack_multiple_out_of_bounds(self):
# nlevels == 3
unstacked = self.ymd.unstack(['year', 'month'])
with assertRaisesRegexp(IndexError, "Too many levels"):
unstacked.stack([2, 3])
with assertRaisesRegexp(IndexError, "not a valid level number"):
unstacked.stack([-4, -3])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period1')
e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10',
'2013-11', '2013-12'], freq='M', name='period2')
expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan]],
index=e_idx, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH 4342
idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02', '2014-01', '2014-01'],
freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10', '2013-10', '2014-02'],
freq='M', name='period2')
value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]}
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10',
'2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A A B B B'.split(), e_2])
expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]],
index=e_1, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01',
'2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A B B'.split(), e_1])
expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]],
index=e_2, columns=e_cols)
assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
""" bug when some uniques are not present in the data #3170"""
id_col = ([1] * 3) + ([2] * 3)
name = (['a'] * 3) + (['b'] * 3)
date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
multi = df.set_index(['DATE', 'ID'])
multi.columns.name = 'Params'
unst = multi.unstack('ID')
down = unst.resample('W-THU')
rs = down.stack('ID')
xp = unst.ix[:, ['VAR1']].resample('W-THU').stack('ID')
xp.columns.name = 'Params'
assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH #3997
df = pd.DataFrame({'A': ['a1', 'a2'],
'B': ['b1', 'b2'],
'C': [1, 1]})
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
self.assertTrue(len(stacked) > len(stacked.dropna()))
stacked = df.unstack().stack(dropna=True)
assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]])
df.index.names = ['a', 'b', 'c']
df.columns.names = ['d', 'e']
# it works!
df.unstack(['b', 'c'])
def test_groupby_transform(self):
s = self.frame['A']
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
result = applied.reindex(expected.index)
assert_series_equal(result, expected, check_names=False)
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl #2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame({'A': np.random.randint(100, size=NUM_ROWS),
'B': np.random.randint(300, size=NUM_ROWS),
'C': np.random.randint(-7, 7, size=NUM_ROWS),
'D': np.random.randint(-19, 19, size=NUM_ROWS),
'E': np.random.randint(3000, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
idf = df.set_index(['A', 'B', 'C', 'D', 'E'])
# it works! is sufficient
idf.unstack('E')
def test_unstack_unobserved_keys(self):
# related to #2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, labels)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
self.assertEqual(len(result.columns), 4)
recons = result.stack()
assert_frame_equal(recons, df)
def test_groupby_corner(self):
midx = MultiIndex(levels=[['foo'], ['bar'], ['baz']],
labels=[[0], [0], [0]], names=['one', 'two', 'three'])
df = DataFrame([np.random.rand(4)], columns=['a', 'b', 'c', 'd'],
index=midx)
# should work
df.groupby(level='three')
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples([('f1', 's1'), ('f1', 's2'),
('f2', 's1'), ('f2', 's2'),
('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.select(lambda u: u[0] in ['f2', 'f3'], axis=1)
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
self.assertTrue((result.columns == ['f2', 'f3']).all())
def test_join(self):
a = self.frame.ix[:5, ['A']]
b = self.frame.ix[2:, ['B', 'C']]
joined = a.join(b, how='outer').reindex(self.frame.index)
expected = self.frame.copy()
expected.values[np.isnan(joined.values)] = np.nan
self.assertFalse(np.isnan(joined.values).all())
assert_frame_equal(joined, expected, check_names=False) # TODO what should join do with names ?
def test_swaplevel(self):
swapped = self.frame['A'].swaplevel(0, 1)
swapped2 = self.frame['A'].swaplevel('first', 'second')
self.assertFalse(swapped.index.equals(self.frame.index))
assert_series_equal(swapped, swapped2)
back = swapped.swaplevel(0, 1)
back2 = swapped.swaplevel('second', 'first')
self.assertTrue(back.index.equals(self.frame.index))
assert_series_equal(back, back2)
ft = self.frame.T
swapped = ft.swaplevel('first', 'second', axis=1)
exp = self.frame.swaplevel('first', 'second').T
assert_frame_equal(swapped, exp)
def test_swaplevel_panel(self):
panel = Panel({'ItemA': self.frame,
'ItemB': self.frame * 2})
result = panel.swaplevel(0, 1, axis='major')
expected = panel.copy()
expected.major_axis = expected.major_axis.swaplevel(0, 1)
tm.assert_panel_equal(result, expected)
def test_reorder_levels(self):
result = self.ymd.reorder_levels(['month', 'day', 'year'])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
assert_frame_equal(result, expected)
result = self.ymd['A'].reorder_levels(['month', 'day', 'year'])
expected = self.ymd['A'].swaplevel(0, 1).swaplevel(1, 2)
assert_series_equal(result, expected)
result = self.ymd.T.reorder_levels(['month', 'day', 'year'], axis=1)
expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
assert_frame_equal(result, expected)
with assertRaisesRegexp(TypeError, 'hierarchical axis'):
self.ymd.reorder_levels([1, 2], axis=1)
with assertRaisesRegexp(IndexError, 'Too many levels'):
self.ymd.index.reorder_levels([1, 2, 3])
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
tm.assertIsInstance(df.columns, MultiIndex)
self.assertTrue((df[2000, 1, 10] == df[2000, 1, 7]).all())
def test_alignment(self):
x = Series(data=[1, 2, 3],
index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)]))
y = Series(data=[4, 5, 6],
index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)]))
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]])
self.assertTrue(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 2, 1]])
self.assertFalse(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 1, 0, 1, 1],
[0, 1, 0, 2, 2, 1]])
self.assertFalse(index.is_lexsorted())
self.assertEqual(index.lexsort_depth, 0)
def test_frame_getitem_view(self):
df = self.frame.T.copy()
# this works because we are modifying the underlying array
# really a no-no
df['foo'].values[:] = 0
self.assertTrue((df['foo'].values == 0).all())
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
df = df.sortlevel(0, axis=1)
# this will work, but will raise/warn as its chained assignment
def f():
df['foo']['one'] = 2
return df
self.assertRaises(com.SettingWithCopyError, f)
try:
df = f()
except:
pass
self.assertTrue((df['foo', 'one'] == 0).all())
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns._tuple_index)]
result = df['foo']
result2 = df.ix[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
df = df.T
result = df.xs('foo')
result2 = df.ix['foo']
expected = df.reindex(df.index[arrays[0] == 'foo'])
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
arrays = [np.array(x) for x in zip(*index._tuple_index)]
result = s['qux']
result2 = s.ix['qux']
expected = s[arrays[0] == 'qux']
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_count(self):
frame = self.frame.copy()
frame.index.names = ['a', 'b']
result = frame.count(level='b')
expect = self.frame.count(level=1)
assert_frame_equal(result, expect, check_names=False)
result = frame.count(level='a')
expect = self.frame.count(level=0)
assert_frame_equal(result, expect, check_names=False)
series = self.series.copy()
series.index.names = ['a', 'b']
result = series.count(level='b')
expect = self.series.count(level=1)
assert_series_equal(result, expect, check_names=False)
self.assertEqual(result.index.name, 'b')
result = series.count(level='a')
expect = self.series.count(level=0)
assert_series_equal(result, expect, check_names=False)
self.assertEqual(result.index.name, 'a')
self.assertRaises(KeyError, series.count, 'x')
self.assertRaises(KeyError, frame.count, level='x')
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var', 'sem']
def test_series_group_min_max(self):
for op, level, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2),
[False, True]):
grouped = self.series.groupby(level=level)
aggf = lambda x: getattr(x, op)(skipna=skipna)
# skipna=True
leftside = grouped.agg(aggf)
rightside = getattr(self.series, op)(level=level, skipna=skipna)
assert_series_equal(leftside, rightside)
def test_frame_group_ops(self):
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2), lrange(2),
[False, True]):
if axis == 0:
frame = self.frame
else:
frame = self.frame.T
grouped = frame.groupby(level=level, axis=axis)
pieces = []
def aggf(x):
pieces.append(x)
return getattr(x, op)(skipna=skipna, axis=axis)
leftside = grouped.agg(aggf)
rightside = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level]
self.assertTrue(leftside._get_axis(axis).equals(level_index))
self.assertTrue(rightside._get_axis(axis).equals(level_index))
assert_frame_equal(leftside, rightside)
def test_stat_op_corner(self):
obj = Series([10.0], index=MultiIndex.from_tuples([(2, 3)]))
result = obj.sum(level=0)
expected = Series([10.0], index=[2])
assert_series_equal(result, expected)
def test_frame_any_all_group(self):
df = DataFrame(
{'data': [False, False, True, False, True, False, True]},
index=[
['one', 'one', 'two', 'one', 'two', 'two', 'two'],
[0, 1, 0, 2, 1, 2, 3]])
result = df.any(level=0)
ex = DataFrame({'data': [False, True]}, index=['one', 'two'])
assert_frame_equal(result, ex)
result = df.all(level=0)
ex = DataFrame({'data': [False, False]}, index=['one', 'two'])
assert_frame_equal(result, ex)
def test_std_var_pass_ddof(self):
index = MultiIndex.from_arrays([np.arange(5).repeat(10),
np.tile(np.arange(10), 5)])
df = DataFrame(np.random.randn(len(index), 5), index=index)
for meth in ['var', 'std']:
ddof = 4
alt = lambda x: getattr(x, meth)(ddof=ddof)
result = getattr(df[0], meth)(level=0, ddof=ddof)
expected = df[0].groupby(level=0).agg(alt)
assert_series_equal(result, expected)
result = getattr(df, meth)(level=0, ddof=ddof)
expected = df.groupby(level=0).agg(alt)
assert_frame_equal(result, expected)
def test_frame_series_agg_multiple_levels(self):
result = self.ymd.sum(level=['year', 'month'])
expected = self.ymd.groupby(level=['year', 'month']).sum()
assert_frame_equal(result, expected)
result = self.ymd['A'].sum(level=['year', 'month'])
expected = self.ymd['A'].groupby(level=['year', 'month']).sum()
assert_series_equal(result, expected)
def test_groupby_multilevel(self):
result = self.ymd.groupby(level=[0, 1]).mean()
k1 = self.ymd.index.get_level_values(0)
k2 = self.ymd.index.get_level_values(1)
expected = self.ymd.groupby([k1, k2]).mean()
assert_frame_equal(result, expected, check_names=False) # TODO groupby with level_values drops names
self.assertEqual(result.index.names, self.ymd.index.names[:2])
result2 = self.ymd.groupby(level=self.ymd.index.names[:2]).mean()
assert_frame_equal(result, result2)
def test_groupby_multilevel_with_transform(self):
pass
def test_multilevel_consolidate(self):
index = MultiIndex.from_tuples([('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('bar', 'two')])
df = DataFrame(np.random.randn(4, 4), index=index, columns=index)
df['Totals', ''] = df.sum(1)
df = df.consolidate()
def test_ix_preserve_names(self):
result = self.ymd.ix[2000]
result2 = self.ymd['A'].ix[2000]
self.assertEqual(result.index.names, self.ymd.index.names[1:])
self.assertEqual(result2.index.names, self.ymd.index.names[1:])
result = self.ymd.ix[2000, 2]
result2 = self.ymd['A'].ix[2000, 2]
self.assertEqual(result.index.name, self.ymd.index.names[2])
self.assertEqual(result2.index.name, self.ymd.index.names[2])
def test_partial_set(self):
# GH #397
df = self.ymd.copy()
exp = self.ymd.copy()
df.ix[2000, 4] = 0
exp.ix[2000, 4].values[:] = 0
assert_frame_equal(df, exp)
df['A'].ix[2000, 4] = 1
exp['A'].ix[2000, 4].values[:] = 1
assert_frame_equal(df, exp)
df.ix[2000] = 5
exp.ix[2000].values[:] = 5
assert_frame_equal(df, exp)
# this works...for now
df['A'].ix[14] = 5
self.assertEqual(df['A'][14], 5)
def test_unstack_preserve_types(self):
# GH #403
self.ymd['E'] = 'foo'
self.ymd['F'] = 2
unstacked = self.ymd.unstack('month')
self.assertEqual(unstacked['A', 1].dtype, np.float64)
self.assertEqual(unstacked['E', 1].dtype, np.object_)
self.assertEqual(unstacked['F', 1].dtype, np.float64)
def test_unstack_group_index_overflow(self):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
index = MultiIndex(levels=[level] * 8 + [[0, 1]],
labels=[labels] * 8 + [np.arange(2).repeat(500)])
s = Series(np.arange(1000), index=index)
result = s.unstack()
self.assertEqual(result.shape, (500, 2))
# test roundtrip
stacked = result.stack()
assert_series_equal(s,
stacked.reindex(s.index))
# put it at beginning
index = MultiIndex(levels=[[0, 1]] + [level] * 8,
labels=[np.arange(2).repeat(500)] + [labels] * 8)
s = Series(np.arange(1000), index=index)
result = s.unstack(0)
self.assertEqual(result.shape, (500, 2))
# put it in middle
index = MultiIndex(levels=[level] * 4 + [[0, 1]] + [level] * 4,
labels=([labels] * 4 + [np.arange(2).repeat(500)]
+ [labels] * 4))
s = Series(np.arange(1000), index=index)
result = s.unstack(4)
self.assertEqual(result.shape, (500, 2))
def test_getitem_lowerdim_corner(self):
self.assertRaises(KeyError, self.frame.ix.__getitem__,
(('bar', 'three'), 'B'))
# in theory should be inserting in a sorted space????
self.frame.ix[('bar','three'),'B'] = 0
self.assertEqual(self.frame.sortlevel().ix[('bar','three'),'B'], 0)
#----------------------------------------------------------------------
# AMBIGUOUS CASES!
def test_partial_ix_missing(self):
raise nose.SkipTest("skipping for now")
result = self.ymd.ix[2000, 0]
expected = self.ymd.ix[2000]['A']
assert_series_equal(result, expected)
# need to put in some work here
# self.ymd.ix[2000, 0] = 0
# self.assertTrue((self.ymd.ix[2000]['A'] == 0).all())
# Pretty sure the second (and maybe even the first) is already wrong.
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6))
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6), 0)
#----------------------------------------------------------------------
def test_to_html(self):
self.ymd.columns.name = 'foo'
self.ymd.to_html()
self.ymd.T.to_html()
def test_level_with_tuples(self):
index = MultiIndex(levels=[[('foo', 'bar', 0), ('foo', 'baz', 0),
('foo', 'qux', 0)],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar', 0)]
result2 = series.ix[('foo', 'bar', 0)]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertRaises(KeyError, series.__getitem__, (('foo', 'bar', 0), 2))
result = frame.ix[('foo', 'bar', 0)]
result2 = frame.xs(('foo', 'bar', 0))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
index = MultiIndex(levels=[[('foo', 'bar'), ('foo', 'baz'),
('foo', 'qux')],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar')]
result2 = series.ix[('foo', 'bar')]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = frame.ix[('foo', 'bar')]
result2 = frame.xs(('foo', 'bar'))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_int_series_slicing(self):
s = self.ymd['A']
result = s[5:]
expected = s.reindex(s.index[5:])
assert_series_equal(result, expected)
exp = self.ymd['A'].copy()
s[5:] = 0
exp.values[5:] = 0
self.assert_numpy_array_equal(s.values, exp.values)
result = self.ymd[5:]
expected = self.ymd.reindex(s.index[5:])
assert_frame_equal(result, expected)
def test_mixed_depth_get(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df['a']
expected = df['a', '', '']
assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, 'a')
result = df['routine1', 'result1']
expected = df['routine1', 'result1', '']
assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, ('routine1', 'result1'))
def test_mixed_depth_insert(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.copy()
expected = df.copy()
result['b'] = [1, 2, 3, 4]
expected['b', '', ''] = [1, 2, 3, 4]
assert_frame_equal(result, expected)
def test_mixed_depth_drop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.drop('a', axis=1)
expected = df.drop([('a', '', '')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(['top'], axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
expected = expected.drop([('top', 'OD', 'wy')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(('top', 'OD', 'wx'), axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
assert_frame_equal(expected, result)
expected = df.drop([('top', 'OD', 'wy')], axis=1)
expected = df.drop('top', axis=1)
result = df.drop('result1', level=1, axis=1)
expected = df.drop([('routine1', 'result1', ''),
('routine2', 'result1', '')], axis=1)
assert_frame_equal(expected, result)
def test_drop_nonunique(self):
df = DataFrame([["x-a", "x", "a", 1.5], ["x-a", "x", "a", 1.2],
["z-c", "z", "c", 3.1], ["x-a", "x", "a", 4.1],
["x-b", "x", "b", 5.1], ["x-b", "x", "b", 4.1],
["x-b", "x", "b", 2.2],
["y-a", "y", "a", 1.2], ["z-b", "z", "b", 2.1]],
columns=["var1", "var2", "var3", "var4"])
grp_size = df.groupby("var1").size()
drop_idx = grp_size.ix[grp_size == 1]
idf = df.set_index(["var1", "var2", "var3"])
# it works! #2101
result = idf.drop(drop_idx.index, level=0).reset_index()
expected = df[-df.var1.isin(drop_idx.index)]
result.index = expected.index
assert_frame_equal(result, expected)
def test_mixed_depth_pop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
df1 = df.copy()
df2 = df.copy()
result = df1.pop('a')
expected = df2.pop(('a', '', ''))
assert_series_equal(expected, result, check_names=False)
assert_frame_equal(df1, df2)
self.assertEqual(result.name, 'a')
expected = df1['top']
df1 = df1.drop(['top'], axis=1)
result = df2.pop('top')
assert_frame_equal(expected, result)
assert_frame_equal(df1, df2)
def test_reindex_level_partial_selection(self):
result = self.frame.reindex(['foo', 'qux'], level=0)
expected = self.frame.ix[[0, 1, 2, 7, 8, 9]]
assert_frame_equal(result, expected)
result = self.frame.T.reindex_axis(['foo', 'qux'], axis=1, level=0)
assert_frame_equal(result, expected.T)
result = self.frame.ix[['foo', 'qux']]
assert_frame_equal(result, expected)
result = self.frame['A'].ix[['foo', 'qux']]
assert_series_equal(result, expected['A'])
result = self.frame.T.ix[:, ['foo', 'qux']]
assert_frame_equal(result, expected.T)
def test_setitem_multiple_partial(self):
expected = self.frame.copy()
result = self.frame.copy()
result.ix[['foo', 'bar']] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_frame_equal(result, expected)
expected = self.frame.copy()
result = self.frame.copy()
result.ix['foo':'bar'] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_frame_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.ix[['foo', 'bar']] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_series_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.ix['foo':'bar'] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_series_equal(result, expected)
def test_drop_level(self):
result = self.frame.drop(['bar', 'qux'], level='first')
expected = self.frame.ix[[0, 1, 2, 5, 6]]
assert_frame_equal(result, expected)
result = self.frame.drop(['two'], level='second')
expected = self.frame.ix[[0, 2, 3, 6, 7, 9]]
assert_frame_equal(result, expected)
result = self.frame.T.drop(['bar', 'qux'], axis=1, level='first')
expected = self.frame.ix[[0, 1, 2, 5, 6]].T
assert_frame_equal(result, expected)
result = self.frame.T.drop(['two'], axis=1, level='second')
expected = self.frame.ix[[0, 2, 3, 6, 7, 9]].T
assert_frame_equal(result, expected)
def test_drop_preserve_names(self):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[1, 2, 3, 1, 2, 3]],
names=['one', 'two'])
df = DataFrame(np.random.randn(6, 3), index=index)
result = df.drop([(0, 2)])
self.assertEqual(result.index.names, ('one', 'two'))
def test_unicode_repr_issues(self):
levels = [Index([u('a/\u03c3'), u('b/\u03c3'), u('c/\u03c3')]),
Index([0, 1])]
labels = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]
index = MultiIndex(levels=levels, labels=labels)
repr(index.levels)
# NumPy bug
# repr(index.get_level_values(1))
def test_unicode_repr_level_names(self):
index = MultiIndex.from_tuples([(0, 0), (1, 1)],
names=[u('\u0394'), 'i1'])
s = Series(lrange(2), index=index)
df = DataFrame(np.random.randn(2, 4), index=index)
repr(s)
repr(df)
def test_dataframe_insert_column_all_na(self):
# GH #1534
mix = MultiIndex.from_tuples(
[('1a', '2a'), ('1a', '2b'), ('1a', '2c')])
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mix)
s = Series({(1, 1): 1, (1, 2): 2})
df['new'] = s
self.assertTrue(df['new'].isnull().all())
def test_join_segfault(self):
# 1532
df1 = DataFrame({'a': [1, 1], 'b': [1, 2], 'x': [1, 2]})
df2 = DataFrame({'a': [2, 2], 'b': [1, 2], 'y': [1, 2]})
df1 = df1.set_index(['a', 'b'])
df2 = df2.set_index(['a', 'b'])
# it works!
for how in ['left', 'right', 'outer']:
df1.join(df2, how=how)
def test_set_column_scalar_with_ix(self):
subset = self.frame.index[[1, 4, 5]]
self.frame.ix[subset] = 99
self.assertTrue((self.frame.ix[subset].values == 99).all())
col = self.frame['B']
col[subset] = 97
self.assertTrue((self.frame.ix[subset, 'B'] == 97).all())
def test_frame_dict_constructor_empty_series(self):
s1 = Series([1, 2, 3, 4], index=MultiIndex.from_tuples([(1, 2), (1, 3),
(2, 2), (2, 4)]))
s2 = Series([1, 2, 3, 4],
index=MultiIndex.from_tuples([(1, 2), (1, 3), (3, 2), (3, 4)]))
s3 = Series()
# it works!
df = DataFrame({'foo': s1, 'bar': s2, 'baz': s3})
df = DataFrame.from_dict({'foo': s1, 'baz': s3, 'bar': s2})
def test_indexing_ambiguity_bug_1678(self):
columns = MultiIndex.from_tuples([('Ohio', 'Green'), ('Ohio', 'Red'),
('Colorado', 'Green')])
index = MultiIndex.from_tuples(
[('a', 1), ('a', 2), ('b', 1), ('b', 2)])
frame = DataFrame(np.arange(12).reshape((4, 3)), index=index,
columns=columns)
result = frame.ix[:, 1]
exp = frame.loc[:, ('Ohio', 'Red')]
tm.assertIsInstance(result, Series)
assert_series_equal(result, exp)
def test_nonunique_assignment_1750(self):
df = DataFrame([[1, 1, "x", "X"], [1, 1, "y", "Y"], [1, 2, "z", "Z"]],
columns=list("ABCD"))
df = df.set_index(['A', 'B'])
ix = MultiIndex.from_tuples([(1, 1)])
df.ix[ix, "C"] = '_'
self.assertTrue((df.xs((1, 1))['C'] == '_').all())
def test_indexing_over_hashtable_size_cutoff(self):
n = 10000
old_cutoff = _index._SIZE_CUTOFF
_index._SIZE_CUTOFF = 20000
s = Series(np.arange(n),
MultiIndex.from_arrays((["a"] * n, np.arange(n))))
# hai it works!
self.assertEqual(s[("a", 5)], 5)
self.assertEqual(s[("a", 6)], 6)
self.assertEqual(s[("a", 7)], 7)
_index._SIZE_CUTOFF = old_cutoff
def test_multiindex_na_repr(self):
# only an issue with long columns
from numpy import nan
df3 = DataFrame({
'A' * 30: {('A', 'A0006000', 'nuit'): 'A0006000'},
'B' * 30: {('A', 'A0006000', 'nuit'): nan},
'C' * 30: {('A', 'A0006000', 'nuit'): nan},
'D' * 30: {('A', 'A0006000', 'nuit'): nan},
'E' * 30: {('A', 'A0006000', 'nuit'): 'A'},
'F' * 30: {('A', 'A0006000', 'nuit'): nan},
})
idf = df3.set_index(['A' * 30, 'C' * 30])
repr(idf)
def test_assign_index_sequences(self):
# #2200
df = DataFrame({"a": [1, 2, 3],
"b": [4, 5, 6],
"c": [7, 8, 9]}).set_index(["a", "b"])
l = list(df.index)
l[0] = ("faz", "boo")
df.index = l
repr(df)
# this travels an improper code path
l[0] = ["faz", "boo"]
df.index = l
repr(df)
def test_tuples_have_na(self):
index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
labels=[[1, 1, 1, 1, -1, 0, 0, 0],
[0, 1, 2, 3, 0, 1, 2, 3]])
self.assertTrue(isnull(index[4][0]))
self.assertTrue(isnull(index.values[4][0]))
def test_duplicate_groupby_issues(self):
idx_tp = [('600809', '20061231'), ('600809', '20070331'),
('600809', '20070630'), ('600809', '20070331')]
dt = ['demo','demo','demo','demo']
idx = MultiIndex.from_tuples(idx_tp,names = ['STK_ID','RPT_Date'])
s = Series(dt, index=idx)
result = s.groupby(s.index).first()
self.assertEqual(len(result), 3)
def test_duplicate_mi(self):
# GH 4516
df = DataFrame([['foo','bar',1.0,1],['foo','bar',2.0,2],['bah','bam',3.0,3],
['bah','bam',4.0,4],['foo','bar',5.0,5],['bah','bam',6.0,6]],
columns=list('ABCD'))
df = df.set_index(['A','B'])
df = df.sortlevel(0)
expected = DataFrame([['foo','bar',1.0,1],['foo','bar',2.0,2],['foo','bar',5.0,5]],
columns=list('ABCD')).set_index(['A','B'])
result = df.loc[('foo','bar')]
assert_frame_equal(result,expected)
def test_duplicated_drop_duplicates(self):
# GH 4060
idx = MultiIndex.from_arrays(([1, 2, 3, 1, 2 ,3], [1, 1, 1, 1, 2, 2]))
expected = np.array([False, False, False, True, False, False], dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
expected = MultiIndex.from_arrays(([1, 2, 3, 2 ,3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(), expected)
expected = np.array([True, False, False, False, False, False])
duplicated = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
expected = MultiIndex.from_arrays(([2, 3, 1, 2 ,3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(keep='last'), expected)
expected = np.array([True, False, False, True, False, False])
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
expected = MultiIndex.from_arrays(([2, 3, 2 ,3], [1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(keep=False), expected)
# deprecate take_last
expected = np.array([True, False, False, False, False, False])
with tm.assert_produces_warning(FutureWarning):
duplicated = idx.duplicated(take_last=True)
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
expected = MultiIndex.from_arrays(([2, 3, 1, 2 ,3], [1, 1, 1, 2, 2]))
with tm.assert_produces_warning(FutureWarning):
tm.assert_index_equal(idx.drop_duplicates(take_last=True), expected)
def test_multiindex_set_index(self):
# segfault in #3308
d = {'t1': [2, 2.5, 3], 't2': [4, 5, 6]}
df = DataFrame(d)
tuples = [(0, 1), (0, 2), (1, 2)]
df['tuples'] = tuples
index = MultiIndex.from_tuples(df['tuples'])
# it works!
df.set_index(index)
def test_datetimeindex(self):
idx1 = pd.DatetimeIndex(['2013-04-01 9:00', '2013-04-02 9:00', '2013-04-03 9:00'] * 2, tz='Asia/Tokyo')
idx2 = pd.date_range('2010/01/01', periods=6, freq='M', tz='US/Eastern')
idx = MultiIndex.from_arrays([idx1, idx2])
expected1 = pd.DatetimeIndex(['2013-04-01 9:00', '2013-04-02 9:00', '2013-04-03 9:00'], tz='Asia/Tokyo')
self.assertTrue(idx.levels[0].equals(expected1))
self.assertTrue(idx.levels[1].equals(idx2))
# from datetime combos
# GH 7888
date1 = datetime.date.today()
date2 = datetime.datetime.today()
date3 = Timestamp.today()
for d1, d2 in itertools.product([date1,date2,date3],[date1,date2,date3]):
index = pd.MultiIndex.from_product([[d1],[d2]])
self.assertIsInstance(index.levels[0],pd.DatetimeIndex)
self.assertIsInstance(index.levels[1],pd.DatetimeIndex)
def test_constructor_with_tz(self):
index = pd.DatetimeIndex(['2013/01/01 09:00', '2013/01/02 09:00'],
name='dt1', tz='US/Pacific')
columns = pd.DatetimeIndex(['2014/01/01 09:00', '2014/01/02 09:00'],
name='dt2', tz='Asia/Tokyo')
result = MultiIndex.from_arrays([index, columns])
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
result = MultiIndex.from_arrays([Series(index), Series(columns)])
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
def test_set_index_datetime(self):
# GH 3950
df = pd.DataFrame({'label':['a', 'a', 'a', 'b', 'b', 'b'],
'datetime':['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00'],
'value':range(6)})
df.index = pd.to_datetime(df.pop('datetime'), utc=True)
df.index = df.index.tz_localize('UTC').tz_convert('US/Pacific')
expected = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00', '2011-07-19 09:00:00'])
expected = expected.tz_localize('UTC').tz_convert('US/Pacific')
df = df.set_index('label', append=True)
self.assertTrue(df.index.levels[0].equals(expected))
self.assertTrue(df.index.levels[1].equals(pd.Index(['a', 'b'])))
df = df.swaplevel(0, 1)
self.assertTrue(df.index.levels[0].equals(pd.Index(['a', 'b'])))
self.assertTrue(df.index.levels[1].equals(expected))
df = DataFrame(np.random.random(6))
idx1 = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00'], tz='US/Eastern')
idx2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-01 09:00', '2012-04-01 09:00',
'2012-04-02 09:00', '2012-04-02 09:00', '2012-04-02 09:00'],
tz='US/Eastern')
idx3 = pd.date_range('2011-01-01 09:00', periods=6, tz='Asia/Tokyo')
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00'], tz='US/Eastern')
expected2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-02 09:00'], tz='US/Eastern')
self.assertTrue(df.index.levels[0].equals(expected1))
self.assertTrue(df.index.levels[1].equals(expected2))
self.assertTrue(df.index.levels[2].equals(idx3))
# GH 7092
self.assertTrue(df.index.get_level_values(0).equals(idx1))
self.assertTrue(df.index.get_level_values(1).equals(idx2))
self.assertTrue(df.index.get_level_values(2).equals(idx3))
def test_reset_index_datetime(self):
# GH 3950
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:
idx1 = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz, name='idx1')
idx2 = pd.Index(range(5), name='idx2',dtype='int64')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame({'a': np.arange(5,dtype='int64'), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx1': [datetime.datetime(2011, 1, 1),
datetime.datetime(2011, 1, 2),
datetime.datetime(2011, 1, 3),
datetime.datetime(2011, 1, 4),
datetime.datetime(2011, 1, 5)],
'idx2': np.arange(5,dtype='int64'),
'a': np.arange(5,dtype='int64'), 'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx1', 'idx2', 'a', 'b'])
expected['idx1'] = expected['idx1'].apply(lambda d: pd.Timestamp(d, tz=tz))
assert_frame_equal(df.reset_index(), expected)
idx3 = pd.date_range('1/1/2012', periods=5, freq='MS', tz='Europe/Paris', name='idx3')
idx = pd.MultiIndex.from_arrays([idx1, idx2, idx3])
df = pd.DataFrame({'a': np.arange(5,dtype='int64'), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx1': [datetime.datetime(2011, 1, 1),
datetime.datetime(2011, 1, 2),
datetime.datetime(2011, 1, 3),
datetime.datetime(2011, 1, 4),
datetime.datetime(2011, 1, 5)],
'idx2': np.arange(5,dtype='int64'),
'idx3': [datetime.datetime(2012, 1, 1),
datetime.datetime(2012, 2, 1),
datetime.datetime(2012, 3, 1),
datetime.datetime(2012, 4, 1),
datetime.datetime(2012, 5, 1)],
'a': np.arange(5,dtype='int64'), 'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx1', 'idx2', 'idx3', 'a', 'b'])
expected['idx1'] = expected['idx1'].apply(lambda d: pd.Timestamp(d, tz=tz))
expected['idx3'] = expected['idx3'].apply(lambda d: pd.Timestamp(d, tz='Europe/Paris'))
assert_frame_equal(df.reset_index(), expected)
# GH 7793
idx = pd.MultiIndex.from_product([['a','b'], pd.date_range('20130101', periods=3, tz=tz)])
df = pd.DataFrame(np.arange(6,dtype='int64').reshape(6,1), columns=['a'], index=idx)
expected = pd.DataFrame({'level_0': 'a a a b b b'.split(),
'level_1': [datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 2),
datetime.datetime(2013, 1, 3)] * 2,
'a': np.arange(6, dtype='int64')},
columns=['level_0', 'level_1', 'a'])
expected['level_1'] = expected['level_1'].apply(lambda d: pd.Timestamp(d, offset='D', tz=tz))
assert_frame_equal(df.reset_index(), expected)
def test_reset_index_period(self):
# GH 7746
idx = pd.MultiIndex.from_product([pd.period_range('20130101', periods=3, freq='M'),
['a','b','c']], names=['month', 'feature'])
df = pd.DataFrame(np.arange(9,dtype='int64').reshape(-1,1), index=idx, columns=['a'])
expected = pd.DataFrame({'month': [pd.Period('2013-01', freq='M')] * 3 +
[pd.Period('2013-02', freq='M')] * 3 +
[pd.Period('2013-03', freq='M')] * 3,
'feature': ['a', 'b', 'c'] * 3,
'a': np.arange(9, dtype='int64')},
columns=['month', 'feature', 'a'])
assert_frame_equal(df.reset_index(), expected)
def test_set_index_period(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = pd.period_range('2011-01-01', periods=3, freq='M')
idx1 = idx1.append(idx1)
idx2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H')
idx2 = idx2.append(idx2).append(idx2)
idx3 = pd.period_range('2005', periods=6, freq='Y')
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = pd.period_range('2011-01-01', periods=3, freq='M')
expected2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H')
self.assertTrue(df.index.levels[0].equals(expected1))
self.assertTrue(df.index.levels[1].equals(expected2))
self.assertTrue(df.index.levels[2].equals(idx3))
self.assertTrue(df.index.get_level_values(0).equals(idx1))
self.assertTrue(df.index.get_level_values(1).equals(idx2))
self.assertTrue(df.index.get_level_values(2).equals(idx3))
def test_repeat(self):
# GH 9361
# fixed by # GH 7891
m_idx = pd.MultiIndex.from_tuples([(1, 2), (3, 4),
(5, 6), (7, 8)])
data = ['a', 'b', 'c', 'd']
m_df = pd.Series(data, index=m_idx)
assert m_df.repeat(3).shape == (3 * len(data),)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
tmhm/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 251 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
amolkahat/pandas | pandas/tests/indexes/multi/test_set_ops.py | 2 | 6118 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import MultiIndex, Series
def test_setops_errorcases(idx):
# # non-iterable input
cases = [0.5, 'xxx']
methods = [idx.intersection, idx.union, idx.difference,
idx.symmetric_difference]
for method in methods:
for case in cases:
tm.assert_raises_regex(TypeError,
"Input must be Index "
"or array-like",
method, case)
def test_intersection_base(idx):
first = idx[:5]
second = idx[:3]
intersect = first.intersection(second)
assert tm.equalContents(intersect, second)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
assert tm.equalContents(result, second)
msg = "other must be a MultiIndex or a list of tuples"
with tm.assert_raises_regex(TypeError, msg):
result = first.intersection([1, 2, 3])
def test_union_base(idx):
first = idx[3:]
second = idx[:5]
everything = idx
union = first.union(second)
assert tm.equalContents(union, everything)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
result = first.union(case)
assert tm.equalContents(result, everything)
msg = "other must be a MultiIndex or a list of tuples"
with tm.assert_raises_regex(TypeError, msg):
result = first.union([1, 2, 3])
def test_difference_base(idx):
first = idx[2:]
second = idx[:4]
answer = idx[4:]
result = first.difference(second)
assert tm.equalContents(result, answer)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
result = first.difference(case)
assert tm.equalContents(result, answer)
msg = "other must be a MultiIndex or a list of tuples"
with tm.assert_raises_regex(TypeError, msg):
result = first.difference([1, 2, 3])
def test_symmetric_difference(idx):
first = idx[1:]
second = idx[:-1]
answer = idx[[0, -1]]
result = first.symmetric_difference(second)
assert tm.equalContents(result, answer)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
result = first.symmetric_difference(case)
assert tm.equalContents(result, answer)
msg = "other must be a MultiIndex or a list of tuples"
with tm.assert_raises_regex(TypeError, msg):
first.symmetric_difference([1, 2, 3])
def test_empty(idx):
# GH 15270
assert not idx.empty
assert idx[:0].empty
def test_difference(idx):
first = idx
result = first.difference(idx[-3:])
expected = MultiIndex.from_tuples(sorted(idx[:-3].values),
sortorder=0,
names=idx.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == idx.names
# empty difference: reflexive
result = idx.difference(idx)
expected = idx[:0]
assert result.equals(expected)
assert result.names == idx.names
# empty difference: superset
result = idx[-3:].difference(idx)
expected = idx[:0]
assert result.equals(expected)
assert result.names == idx.names
# empty difference: degenerate
result = idx[:0].difference(idx)
expected = idx[:0]
assert result.equals(expected)
assert result.names == idx.names
# names not the same
chunklet = idx[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = idx.difference(idx.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_union(idx):
piece1 = idx[:5][::-1]
piece2 = idx[3:]
the_union = piece1 | piece2
tups = sorted(idx.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = idx.union(idx)
assert the_union is idx
the_union = idx.union(idx[:0])
assert the_union is idx
# won't work in python 3
# tuples = _index.values
# result = _index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(idx)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = _index.union(other)
# assert result.equals(result2)
def test_intersection(idx):
piece1 = idx[:5][::-1]
piece2 = idx[3:]
the_int = piece1 & piece2
tups = sorted(idx[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = idx.intersection(idx)
assert the_int is idx
# empty intersection: disjoint
empty = idx[:2] & idx[2:]
expected = idx[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = _index.values
# result = _index & tuples
# assert result.equals(tuples)
| bsd-3-clause |
KnHuq/Dynamic-Tensorflow-Tutorial | BiDirectional LSTM/bi_directional_lstm.py | 2 | 12777 | import tensorflow as tf
from sklearn import datasets
from sklearn.cross_validation import train_test_split
import pylab as pl
from IPython import display
import sys
# # Bi-LSTM class and functions
class Bi_LSTM_cell(object):
"""
Bi directional LSTM cell object which takes 3 arguments for initialization.
input_size = Input Vector size
hidden_layer_size = Hidden layer size
target_size = Output vector size
"""
def __init__(self, input_size, hidden_layer_size, target_size):
# Initialization of given values
self.input_size = input_size
self.hidden_layer_size = hidden_layer_size
self.target_size = target_size
# Weights and Bias for input and hidden tensor for forward pass
self.Wi = tf.Variable(tf.zeros(
[self.input_size, self.hidden_layer_size]))
self.Ui = tf.Variable(tf.zeros(
[self.hidden_layer_size, self.hidden_layer_size]))
self.bi = tf.Variable(tf.zeros([self.hidden_layer_size]))
self.Wf = tf.Variable(tf.zeros(
[self.input_size, self.hidden_layer_size]))
self.Uf = tf.Variable(tf.zeros(
[self.hidden_layer_size, self.hidden_layer_size]))
self.bf = tf.Variable(tf.zeros([self.hidden_layer_size]))
self.Wog = tf.Variable(tf.zeros(
[self.input_size, self.hidden_layer_size]))
self.Uog = tf.Variable(tf.zeros(
[self.hidden_layer_size, self.hidden_layer_size]))
self.bog = tf.Variable(tf.zeros([self.hidden_layer_size]))
self.Wc = tf.Variable(tf.zeros(
[self.input_size, self.hidden_layer_size]))
self.Uc = tf.Variable(tf.zeros(
[self.hidden_layer_size, self.hidden_layer_size]))
self.bc = tf.Variable(tf.zeros([self.hidden_layer_size]))
# Weights and Bias for input and hidden tensor for backward pass
self.Wi1 = tf.Variable(tf.zeros(
[self.input_size, self.hidden_layer_size]))
self.Ui1 = tf.Variable(tf.zeros(
[self.hidden_layer_size, self.hidden_layer_size]))
self.bi1 = tf.Variable(tf.zeros([self.hidden_layer_size]))
self.Wf1 = tf.Variable(tf.zeros(
[self.input_size, self.hidden_layer_size]))
self.Uf1 = tf.Variable(tf.zeros(
[self.hidden_layer_size, self.hidden_layer_size]))
self.bf1 = tf.Variable(tf.zeros([self.hidden_layer_size]))
self.Wog1 = tf.Variable(tf.zeros(
[self.input_size, self.hidden_layer_size]))
self.Uog1 = tf.Variable(tf.zeros(
[self.hidden_layer_size, self.hidden_layer_size]))
self.bog1 = tf.Variable(tf.zeros([self.hidden_layer_size]))
self.Wc1 = tf.Variable(tf.zeros(
[self.input_size, self.hidden_layer_size]))
self.Uc1 = tf.Variable(tf.zeros(
[self.hidden_layer_size, self.hidden_layer_size]))
self.bc1 = tf.Variable(tf.zeros([self.hidden_layer_size]))
# Weights for output layers
self.Wo = tf.Variable(tf.truncated_normal(
[self.hidden_layer_size * 2, self.target_size],
mean=0, stddev=.01))
self.bo = tf.Variable(tf.truncated_normal(
[self.target_size], mean=0, stddev=.01))
# Placeholder for input vector with shape[batch, seq, embeddings]
self._inputs = tf.placeholder(tf.float32,
shape=[None, None, self.input_size],
name='inputs')
# Reversing the inputs by sequence for backward pass of the LSTM
self._inputs_rev = tf.reverse(self._inputs, [False, True, False])
# Processing inputs to work with scan function
self.processed_input = process_batch_input_for_RNN(self._inputs)
# For bacward pass of the LSTM
self.processed_input_rev = process_batch_input_for_RNN(
self._inputs_rev)
'''
Initial hidden state's shape is [1,self.hidden_layer_size]
In First time stamp, we are doing dot product with weights to
get the shape of [batch_size, self.hidden_layer_size].
For this dot product tensorflow use broadcasting. But during
Back propagation a low level error occurs.
So to solve the problem it was needed to initialize initial
hiddden state of size [batch_size, self.hidden_layer_size].
So here is a little hack !!!! Getting the same shaped
initial hidden state of zeros.
'''
self.initial_hidden = self._inputs[:, 0, :]
self.initial_hidden = tf.matmul(
self.initial_hidden, tf.zeros([input_size, hidden_layer_size]))
self.initial_hidden = tf.stack(
[self.initial_hidden, self.initial_hidden])
# Function for Forward LSTM cell.
def Lstm_f(self, previous_hidden_memory_tuple, x):
"""
This function takes previous hidden state
and memory tuple with input and
outputs current hidden state.
"""
previous_hidden_state, c_prev = tf.unstack(previous_hidden_memory_tuple)
# Input Gate
i = tf.sigmoid(
tf.matmul(x, self.Wi) +
tf.matmul(previous_hidden_state, self.Ui) + self.bi
)
# Forget Gate
f = tf.sigmoid(
tf.matmul(x, self.Wf) +
tf.matmul(previous_hidden_state, self.Uf) + self.bf
)
# Output Gate
o = tf.sigmoid(
tf.matmul(x, self.Wog) +
tf.matmul(previous_hidden_state, self.Uog) + self.bog
)
# New Memory Cell
c_ = tf.nn.tanh(
tf.matmul(x, self.Wc) +
tf.matmul(previous_hidden_state, self.Uc) + self.bc
)
# Final Memory cell
c = f * c_prev + i * c_
# Current Hidden state
current_hidden_state = o * tf.nn.tanh(c)
return tf.stack([current_hidden_state, c])
# Function for Forward LSTM cell.
def Lstm_b(self, previous_hidden_memory_tuple, x):
"""
This function takes previous hidden
state and memory tuple with input and
outputs current hidden state.
"""
previous_hidden_state, c_prev = tf.unstack(previous_hidden_memory_tuple)
# Input Gate
i = tf.sigmoid(
tf.matmul(x, self.Wi1) +
tf.matmul(previous_hidden_state, self.Ui1) + self.bi1
)
# Forget Gate
f = tf.sigmoid(
tf.matmul(x, self.Wf1) +
tf.matmul(previous_hidden_state, self.Uf1) + self.bf1
)
# Output Gate
o = tf.sigmoid(
tf.matmul(x, self.Wog1) +
tf.matmul(previous_hidden_state, self.Uog1) + self.bog1
)
# New Memory Cell
c_ = tf.nn.tanh(
tf.matmul(x, self.Wc1) +
tf.matmul(previous_hidden_state, self.Uc1) + self.bc1
)
# Final Memory cell
c = f * c_prev + i * c_
# Current Hidden state
current_hidden_state = o * tf.nn.tanh(c)
return tf.stack([current_hidden_state, c])
# Function to get the hidden and memory cells after forward pass
def get_states_f(self):
"""
Iterates through time/ sequence to get all hidden state
"""
# Getting all hidden state throuh time
all_hidden_memory_states = tf.scan(self.Lstm_f,
self.processed_input,
initializer=self.initial_hidden,
name='states')
all_hidden_states = all_hidden_memory_states[:, 0, :, :]
all_memory_states = all_hidden_memory_states[:, 1, :, :]
return all_hidden_states, all_memory_states
# Function to get the hidden and memory cells after backward pass
def get_states_b(self):
"""
Iterates through time/ sequence to get all hidden state
"""
all_hidden_states, all_memory_states = self.get_states_f()
# Reversing the hidden and memory state to get the final hidden and
# memory state
last_hidden_states = all_hidden_states[-1]
last_memory_states = all_memory_states[-1]
# For backward pass using the last hidden and memory of the forward
# pass
initial_hidden = tf.stack([last_hidden_states, last_memory_states])
# Getting all hidden state throuh time
all_hidden_memory_states = tf.scan(self.Lstm_b,
self.processed_input_rev,
initializer=initial_hidden,
name='states')
# Now reversing the states to keep those in original order
#all_hidden_states = tf.reverse(all_hidden_memory_states[
# :, 0, :, :], [True, False, False])
#all_memory_states = tf.reverse(all_hidden_memory_states[
# :, 1, :, :], [True, False, False])
return all_hidden_states, all_memory_states
# Function to concat the hiddenstates for backward and forward pass
def get_concat_hidden(self):
# Getting hidden and memory for the forward pass
all_hidden_states_f, all_memory_states_f = self.get_states_f()
# Getting hidden and memory for the backward pass
all_hidden_states_b, all_memory_states_b = self.get_states_b()
# Concating the hidden states of forward and backward pass
concat_hidden = tf.concat(
[all_hidden_states_f, all_hidden_states_b],2)
return concat_hidden
# Function to get output from a hidden layer
def get_output(self, hidden_state):
"""
This function takes hidden state and returns output
"""
output = tf.nn.sigmoid(tf.matmul(hidden_state, self.Wo) + self.bo)
return output
# Function for getting all output layers
def get_outputs(self):
"""
Iterating through hidden states to get outputs for all timestamp
"""
all_hidden_states = self.get_concat_hidden()
all_outputs = tf.map_fn(self.get_output, all_hidden_states)
return all_outputs
# Function to convert batch input data to use scan ops of tensorflow.
def process_batch_input_for_RNN(batch_input):
"""
Process tensor of size [5,3,2] to [3,5,2]
"""
batch_input_ = tf.transpose(batch_input, perm=[2, 0, 1])
X = tf.transpose(batch_input_)
return X
# # Placeholder and initializers
hidden_layer_size = 30
input_size = 8
target_size = 10
y = tf.placeholder(tf.float32, shape=[None, target_size], name='inputs')
# # Models
# Initializing rnn object
rnn = Bi_LSTM_cell(input_size, hidden_layer_size, target_size)
# Getting all outputs from rnn
outputs = rnn.get_outputs()
# Getting first output through indexing
last_output = outputs[-1]
# As rnn model output the final layer through Relu activation softmax is
# used for final output.
output = tf.nn.softmax(last_output)
# Computing the Cross Entropy loss
cross_entropy = -tf.reduce_sum(y * tf.log(output))
# Trainning with Adadelta Optimizer
train_step = tf.train.AdamOptimizer().minimize(cross_entropy)
# Calculatio of correct prediction and accuracy
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(output, 1))
accuracy = (tf.reduce_mean(tf.cast(correct_prediction, tf.float32))) * 100
# # Dataset Preparation
# Function to get on hot
def get_on_hot(number):
on_hot = [0] * 10
on_hot[number] = 1
return on_hot
# Using Sklearn MNIST dataset.
digits = datasets.load_digits()
X = digits.images
Y_ = digits.target
Y = map(get_on_hot, Y_)
# Getting Train and test Dataset
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.22, random_state=42)
# Cuttting for simple iteration
X_train = X_train[:1400]
y_train = y_train[:1400]
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
# Iterations to do trainning
for epoch in range(200):
start = 0
end = 100
for i in range(14):
X = X_train[start:end]
Y = y_train[start:end]
start = end
end = start + 100
sess.run(train_step, feed_dict={rnn._inputs: X, y: Y})
Loss = str(sess.run(cross_entropy, feed_dict={rnn._inputs: X, y: Y}))
Train_accuracy = str(sess.run(accuracy, feed_dict={
rnn._inputs: X_train[:500], y: y_train[:500]}))
Test_accuracy = str(sess.run(accuracy, feed_dict={
rnn._inputs: X_test, y: y_test}))
sys.stdout.flush()
print("\rIteration: %s Loss: %s Train Accuracy: %s Test Accuracy: %s" %
(epoch, Loss, Train_accuracy, Test_accuracy)),
sys.stdout.flush()
| mit |
btabibian/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.